|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH 04/10] tools: Add support for new HVM params
From: Don Slutz <dslutz@xxxxxxxxxxx>
Signed-off-by: Don Slutz <dslutz@xxxxxxxxxxx>
---
tools/libxc/xc_domain_restore.c | 27 +++++++++++++++++++++++++++
tools/libxc/xc_domain_save.c | 24 ++++++++++++++++++++++++
tools/libxc/xg_save_restore.h | 2 ++
tools/libxl/libxl_create.c | 4 +++-
tools/libxl/libxl_dom.c | 5 +++++
tools/libxl/libxl_types.idl | 2 ++
tools/libxl/xl_cmdimpl.c | 10 ++++++++++
tools/libxl/xl_sxp.c | 4 ++++
xen/arch/x86/hvm/hvm.c | 1 +
9 files changed, 78 insertions(+), 1 deletion(-)
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 80769a7..9bfb608 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -746,6 +746,8 @@ typedef struct {
uint64_t acpi_ioport_location;
uint64_t viridian;
uint64_t vm_generationid_addr;
+ uint64_t vmware_hw;
+ uint64_t vmport_logmask;
struct toolstack_data_t tdata;
} pagebuf_t;
@@ -930,6 +932,26 @@ static int pagebuf_get_one(xc_interface *xch, struct
restore_ctx *ctx,
}
return pagebuf_get_one(xch, ctx, buf, fd, dom);
+ case XC_SAVE_ID_HVM_VMWARE_HW:
+ /* Skip padding 4 bytes then read the vmware flag. */
+ if ( RDEXACT(fd, &buf->vmware_hw, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->vmware_hw, sizeof(uint64_t)) )
+ {
+ PERROR("error read the vmware_hw value");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
+ case XC_SAVE_ID_HVM_VMPORT_LOGMASK:
+ /* Skip padding 4 bytes then read the vmport_logmask flag. */
+ if ( RDEXACT(fd, &buf->vmport_logmask, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->vmport_logmask, sizeof(uint64_t)) )
+ {
+ PERROR("error read the vmport_logmask flag");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
case XC_SAVE_ID_TOOLSTACK:
{
if ( RDEXACT(fd, &buf->tdata.len, sizeof(buf->tdata.len)) )
@@ -1755,6 +1777,11 @@ int xc_domain_restore(xc_interface *xch, int io_fd,
uint32_t dom,
if (pagebuf.viridian != 0)
xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+ if (pagebuf.vmware_hw != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VMWARE_HW,
pagebuf.vmport_logmask);
+ if (pagebuf.vmport_logmask != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VMPORT_LOGMASK,
pagebuf.vmport_logmask);
+
if (pagebuf.acpi_ioport_location == 1) {
DBGPRINTF("Use new firmware ioport from the checkpoint\n");
xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c
index 42c4752..dac7c2c 100644
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -1731,6 +1731,30 @@ int xc_domain_save(xc_interface *xch, int io_fd,
uint32_t dom, uint32_t max_iter
PERROR("Error when writing the viridian flag");
goto out;
}
+
+ chunk.id = XC_SAVE_ID_HVM_VMWARE_HW;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VMWARE_HW,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the vmware_hw");
+ goto out;
+ }
+
+ chunk.id = XC_SAVE_ID_HVM_VMPORT_LOGMASK;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VMPORT_LOGMASK,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the vmport_loglvl");
+ goto out;
+ }
}
if ( callbacks != NULL && callbacks->toolstack_save != NULL )
diff --git a/tools/libxc/xg_save_restore.h b/tools/libxc/xg_save_restore.h
index f859621..69f44de 100644
--- a/tools/libxc/xg_save_restore.h
+++ b/tools/libxc/xg_save_restore.h
@@ -259,6 +259,8 @@
#define XC_SAVE_ID_HVM_ACCESS_RING_PFN -16
#define XC_SAVE_ID_HVM_SHARING_RING_PFN -17
#define XC_SAVE_ID_TOOLSTACK -18 /* Optional toolstack specific info
*/
+#define XC_SAVE_ID_HVM_VMWARE_HW -19
+#define XC_SAVE_ID_HVM_VMPORT_LOGMASK -20
/*
** We process save/restore/migrate in batches of pages; the below
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index e03bb55..8b08bbd 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -388,13 +388,15 @@ int libxl__domain_build(libxl__gc *gc,
vments[4] = "start_time";
vments[5] = libxl__sprintf(gc, "%lu.%02d",
start_time.tv_sec,(int)start_time.tv_usec/10000);
- localents = libxl__calloc(gc, 7, sizeof(char *));
+ localents = libxl__calloc(gc, 9, sizeof(char *));
localents[0] = "platform/acpi";
localents[1] = libxl_defbool_val(info->u.hvm.acpi) ? "1" : "0";
localents[2] = "platform/acpi_s3";
localents[3] = libxl_defbool_val(info->u.hvm.acpi_s3) ? "1" : "0";
localents[4] = "platform/acpi_s4";
localents[5] = libxl_defbool_val(info->u.hvm.acpi_s4) ? "1" : "0";
+ localents[6] = "platform/vmware_hw";
+ localents[7] = libxl__sprintf(gc, "%d", info->u.hvm.vmware_hw);
break;
case LIBXL_DOMAIN_TYPE_PV:
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 078cff1..53ab6a6 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -496,6 +496,11 @@ static int hvm_build_set_params(xc_interface *handle,
uint32_t domid,
xc_set_hvm_param(handle, domid, HVM_PARAM_STORE_EVTCHN, store_evtchn);
xc_set_hvm_param(handle, domid, HVM_PARAM_CONSOLE_EVTCHN, console_evtchn);
+ xc_set_hvm_param(handle, domid, HVM_PARAM_VMWARE_HW,
+ info->u.hvm.vmware_hw);
+ xc_set_hvm_param(handle, domid, HVM_PARAM_VMPORT_LOGMASK,
+ info->u.hvm.vmport_logmask);
+
xc_dom_gnttab_hvm_seed(handle, domid, *console_mfn, *store_mfn,
console_domid, store_domid);
return 0;
}
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 649ce50..71ba64e 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -346,6 +346,8 @@ libxl_domain_build_info = Struct("domain_build_info",[
("timeoffset", string),
("hpet", libxl_defbool),
("vpt_align", libxl_defbool),
+ ("vmware_hw", integer),
+ ("vmport_logmask", integer),
("timer_mode", libxl_timer_mode),
("nested_hvm", libxl_defbool),
("smbios_firmware", string),
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index bd26bcc..013066d 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -987,6 +987,16 @@ static void parse_config_data(const char *config_source,
xlu_cfg_get_defbool(config, "acpi_s4", &b_info->u.hvm.acpi_s4, 0);
xlu_cfg_get_defbool(config, "nx", &b_info->u.hvm.nx, 0);
xlu_cfg_get_defbool(config, "viridian", &b_info->u.hvm.viridian, 0);
+ if (!xlu_cfg_get_long(config, "vmware_hw", &l, 1)) {
+ b_info->u.hvm.vmware_hw = l;
+ if (dom_info->debug)
+ fprintf(stderr, "vmware_hw: 0x%llx\n", (unsigned long long)
b_info->u.hvm.vmware_hw);
+ }
+ if (!xlu_cfg_get_long(config, "vmport_logmask", &l, 1)) {
+ b_info->u.hvm.vmport_logmask = l;
+ if (dom_info->debug)
+ fprintf(stderr, "vmport_logmask: 0x%llx\n", (unsigned long
long) b_info->u.hvm.vmport_logmask);
+ }
xlu_cfg_get_defbool(config, "hpet", &b_info->u.hvm.hpet, 0);
xlu_cfg_get_defbool(config, "vpt_align", &b_info->u.hvm.vpt_align, 0);
diff --git a/tools/libxl/xl_sxp.c b/tools/libxl/xl_sxp.c
index a16a025..9010c42 100644
--- a/tools/libxl/xl_sxp.c
+++ b/tools/libxl/xl_sxp.c
@@ -102,6 +102,10 @@ void printf_info_sexp(int domid, libxl_domain_config
*d_config)
printf("\t\t\t(nx %s)\n", libxl_defbool_to_string(b_info->u.hvm.nx));
printf("\t\t\t(viridian %s)\n",
libxl_defbool_to_string(b_info->u.hvm.viridian));
+ printf("\t\t\t(vmware_hw %d)\n",
+ b_info->u.hvm.vmware_hw);
+ printf("\t\t\t(vmport_logmask %x)\n",
+ b_info->u.hvm.vmport_logmask);
printf("\t\t\t(hpet %s)\n",
libxl_defbool_to_string(b_info->u.hvm.hpet));
printf("\t\t\t(vpt_align %s)\n",
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6a7a781..38641c4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -589,6 +589,7 @@ int hvm_domain_initialise(struct domain *d)
d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
+ d->arch.hvm_domain.params[HVM_PARAM_VMPORT_RESET_TIME] = 15;
vpic_init(d);
--
1.8.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |