|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC Patch v2 06/16] colo: implement restore_callbacks init()/free()
This patch implements restore callbacks for colo:
1. init(): allocate some memory
2. free(): free the memory allocated in init()
Signed-off-by: Ye Wei <wei.ye1987@xxxxxxxxx>
Signed-off-by: Jiang Yunhong <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
---
tools/libxc/Makefile | 2 +-
tools/libxc/xc_domain_restore_colo.c | 145 ++++++++++++++++++++++++++++++++++
tools/libxc/xc_save_restore_colo.h | 10 +++
3 files changed, 156 insertions(+), 1 deletions(-)
create mode 100644 tools/libxc/xc_domain_restore_colo.c
create mode 100644 tools/libxc/xc_save_restore_colo.h
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 512a994..70994b9 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -42,7 +42,7 @@ CTRL_SRCS-$(CONFIG_MiniOS) += xc_minios.c
GUEST_SRCS-y :=
GUEST_SRCS-y += xg_private.c xc_suspend.c
ifeq ($(CONFIG_MIGRATE),y)
-GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c
+GUEST_SRCS-y += xc_domain_restore.c xc_domain_save.c xc_domain_restore_colo.c
GUEST_SRCS-y += xc_offline_page.c xc_compression.c
else
GUEST_SRCS-y += xc_nomigrate.c
diff --git a/tools/libxc/xc_domain_restore_colo.c
b/tools/libxc/xc_domain_restore_colo.c
new file mode 100644
index 0000000..674e55e
--- /dev/null
+++ b/tools/libxc/xc_domain_restore_colo.c
@@ -0,0 +1,145 @@
+#include <xc_save_restore_colo.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <xc_bitops.h>
+
+struct restore_colo_data
+{
+ unsigned long max_mem_pfn;
+
+ /* cache the whole memory
+ *
+ * The SVM is running in colo mode, so should cache the whole memory
+ * of SVM.
+ */
+ char* pagebase;
+
+ /* which page is dirty? */
+ unsigned long *dirty_pages;
+
+ /* suspend evtchn */
+ int local_port;
+
+ xc_evtchn *xce;
+
+ int first_time;
+
+ /* PV */
+ /* store the pfn type on slave side */
+ unsigned long *pfn_type_slaver;
+ xen_pfn_t p2m_fll;
+
+ /* cache p2m frame list list */
+ char *p2m_frame_list_list;
+
+ /* cache p2m frame list */
+ char *p2m_frame_list;
+
+ /* temp buffer(avoid malloc/free frequently) */
+ unsigned long *pfn_batch_slaver;
+ unsigned long *pfn_type_batch_slaver;
+ unsigned long *p2m_frame_list_temp;
+};
+
+/* we restore only one vm in a process, so it is safe to use global variable */
+DECLARE_HYPERCALL_BUFFER(unsigned long, dirty_pages);
+
+int colo_init(struct restore_data *comm_data, void **data)
+{
+ xc_dominfo_t info;
+ int i;
+ unsigned long size;
+ xc_interface *xch = comm_data->xch;
+ struct restore_colo_data *colo_data;
+ struct domain_info_context *dinfo = comm_data->dinfo;
+
+ if (dirty_pages)
+ /* restore_colo_init() is called more than once?? */
+ return -1;
+
+ colo_data = calloc(1, sizeof(struct restore_colo_data));
+ if (!colo_data)
+ return -1;
+
+ if (comm_data->hvm)
+ {
+ /* hvm is unsupported now */
+ free(colo_data);
+ return -1;
+ }
+
+ if (xc_domain_getinfo(xch, comm_data->dom, 1, &info) != 1)
+ {
+ PERROR("Could not get domain info");
+ goto err;
+ }
+
+ colo_data->max_mem_pfn = info.max_memkb >> (PAGE_SHIFT - 10);
+
+ colo_data->pfn_type_slaver = calloc(dinfo->p2m_size, sizeof(xen_pfn_t));
+ colo_data->pfn_batch_slaver = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
+ colo_data->pfn_type_batch_slaver = calloc(MAX_BATCH_SIZE,
sizeof(xen_pfn_t));
+ colo_data->p2m_frame_list_temp = malloc(P2M_FL_ENTRIES * sizeof(unsigned
long));
+ colo_data->p2m_frame_list_list = malloc(PAGE_SIZE);
+ colo_data->p2m_frame_list = malloc(P2M_FLL_ENTRIES * PAGE_SIZE);
+ if (!colo_data->pfn_type_slaver || !colo_data->pfn_batch_slaver ||
+ !colo_data->pfn_type_batch_slaver || !colo_data->p2m_frame_list_temp ||
+ !colo_data->p2m_frame_list_list || !colo_data->p2m_frame_list) {
+ PERROR("Could not allocate memory for restore colo data");
+ goto err;
+ }
+
+ dirty_pages = xc_hypercall_buffer_alloc_pages(xch, dirty_pages,
+ NRPAGES(bitmap_size(dinfo->p2m_size)));
+ colo_data->dirty_pages = dirty_pages;
+
+ size = dinfo->p2m_size * PAGE_SIZE;
+ colo_data->pagebase = malloc(size);
+ if (!colo_data->dirty_pages || !colo_data->pagebase) {
+ PERROR("Could not allocate memory for restore colo data");
+ goto err;
+ }
+
+ colo_data->xce = xc_evtchn_open(NULL, 0);
+ if (!colo_data->xce) {
+ PERROR("Could not open evtchn");
+ goto err;
+ }
+
+ for (i = 0; i < dinfo->p2m_size; i++)
+ comm_data->pfn_type[i] = XEN_DOMCTL_PFINFO_XTAB;
+ memset(dirty_pages, 0xff, bitmap_size(dinfo->p2m_size));
+ colo_data->first_time = 1;
+ colo_data->local_port = -1;
+ *data = colo_data;
+
+ return 0;
+
+err:
+ colo_free(comm_data, colo_data);
+ *data = NULL;
+ return -1;
+}
+
+void colo_free(struct restore_data *comm_data, void *data)
+{
+ struct restore_colo_data *colo_data = data;
+ struct domain_info_context *dinfo = comm_data->dinfo;
+
+ if (!colo_data)
+ return;
+
+ free(colo_data->pfn_type_slaver);
+ free(colo_data->pagebase);
+ free(colo_data->pfn_batch_slaver);
+ free(colo_data->pfn_type_batch_slaver);
+ free(colo_data->p2m_frame_list_temp);
+ free(colo_data->p2m_frame_list);
+ free(colo_data->p2m_frame_list_list);
+ if (dirty_pages)
+ xc_hypercall_buffer_free_pages(comm_data->xch, dirty_pages,
+ NRPAGES(bitmap_size(dinfo->p2m_size)));
+ if (colo_data->xce)
+ xc_evtchn_close(colo_data->xce);
+ free(colo_data);
+}
diff --git a/tools/libxc/xc_save_restore_colo.h
b/tools/libxc/xc_save_restore_colo.h
new file mode 100644
index 0000000..b5416af
--- /dev/null
+++ b/tools/libxc/xc_save_restore_colo.h
@@ -0,0 +1,10 @@
+#ifndef XC_SAVE_RESTORE_COLO_H
+#define XC_SAVE_RESTORE_COLO_H
+
+#include <xg_save_restore.h>
+#include <xg_private.h>
+
+extern int colo_init(struct restore_data *, void **);
+extern void colo_free(struct restore_data *, void *);
+
+#endif
--
1.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |