# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1288772259 0
# Node ID 16093532f384eee02518520662a38ad16915b063
# Parent b3964f2f70e89887fcb333ba06dcf28929582566
x86: xsave save/restore support for both PV and HVM guests.
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
Signed-off-by: Han Weidong <weidong.han@xxxxxxxxx>
---
tools/libxc/xc_domain_restore.c | 78 ++++++++++++++++---
tools/libxc/xc_domain_save.c | 75 +++++++++++++++++-
xen/arch/x86/domctl.c | 135 +++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/hvm.c | 118 ++++++++++++++++++++++++++++
xen/include/public/arch-x86/hvm/save.h | 25 +++++-
xen/include/public/domctl.h | 28 ++++++
xen/include/xsm/xsm.h | 5 +
xen/xsm/flask/hooks.c | 20 ++++
xen/xsm/flask/include/av_permissions.h | 2
9 files changed, 473 insertions(+), 13 deletions(-)
diff -r b3964f2f70e8 -r 16093532f384 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Wed Nov 03 08:16:47 2010 +0000
+++ b/tools/libxc/xc_domain_restore.c Wed Nov 03 08:17:39 2010 +0000
@@ -189,7 +189,8 @@ static int uncanonicalize_pagetable(
/* Load the p2m frame list, plus potential extended info chunk */
static xen_pfn_t *load_p2m_frame_list(
xc_interface *xch, struct restore_ctx *ctx,
- int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
+ int io_fd, int *pae_extended_cr3, int *ext_vcpucontext,
+ int *vcpuextstate, uint32_t *vcpuextstate_size)
{
xen_pfn_t *p2m_frame_list;
vcpu_guest_context_any_t ctxt;
@@ -265,6 +266,13 @@ static xen_pfn_t *load_p2m_frame_list(
else if ( !strncmp(chunk_sig, "extv", 4) )
{
*ext_vcpucontext = 1;
+ }
+ else if ( !strncmp(chunk_sig, "xcnt", 4) )
+ {
+ *vcpuextstate = 1;
+ RDEXACT(io_fd, vcpuextstate_size, sizeof(*vcpuextstate_size));
+ tot_bytes -= chunk_bytes;
+ chunk_bytes = 0;
}
/* Any remaining bytes of this chunk: read and discard. */
@@ -449,7 +457,8 @@ static int buffer_tail_hvm(xc_interface
static int buffer_tail_hvm(xc_interface *xch, struct restore_ctx *ctx,
struct tailbuf_hvm *buf, int fd,
unsigned int max_vcpu_id, uint64_t vcpumap,
- int ext_vcpucontext)
+ int ext_vcpucontext,
+ int vcpuextstate, uint32_t vcpuextstate_size)
{
uint8_t *tmp;
unsigned char qemusig[21];
@@ -516,7 +525,9 @@ static int buffer_tail_pv(xc_interface *
static int buffer_tail_pv(xc_interface *xch, struct restore_ctx *ctx,
struct tailbuf_pv *buf, int fd,
unsigned int max_vcpu_id, uint64_t vcpumap,
- int ext_vcpucontext)
+ int ext_vcpucontext,
+ int vcpuextstate,
+ uint32_t vcpuextstate_size)
{
unsigned int i;
size_t pfnlen, vcpulen;
@@ -556,6 +567,9 @@ static int buffer_tail_pv(xc_interface *
: sizeof(vcpu_guest_context_x86_32_t)) * buf->vcpucount;
if ( ext_vcpucontext )
vcpulen += 128 * buf->vcpucount;
+ if ( vcpuextstate ) {
+ vcpulen += vcpuextstate_size * buf->vcpucount;
+ }
if ( !(buf->vcpubuf) ) {
if ( !(buf->vcpubuf = malloc(vcpulen)) ) {
@@ -594,14 +608,17 @@ static int buffer_tail_pv(xc_interface *
static int buffer_tail(xc_interface *xch, struct restore_ctx *ctx,
tailbuf_t *buf, int fd, unsigned int max_vcpu_id,
- uint64_t vcpumap, int ext_vcpucontext)
+ uint64_t vcpumap, int ext_vcpucontext,
+ int vcpuextstate, uint32_t vcpuextstate_size)
{
if ( buf->ishvm )
return buffer_tail_hvm(xch, ctx, &buf->u.hvm, fd, max_vcpu_id, vcpumap,
- ext_vcpucontext);
+ ext_vcpucontext, vcpuextstate,
+ vcpuextstate_size);
else
return buffer_tail_pv(xch, ctx, &buf->u.pv, fd, max_vcpu_id, vcpumap,
- ext_vcpucontext);
+ ext_vcpucontext, vcpuextstate,
+ vcpuextstate_size);
}
static void tailbuf_free_hvm(struct tailbuf_hvm *buf)
@@ -1056,6 +1073,8 @@ int xc_domain_restore(xc_interface *xch,
{
DECLARE_DOMCTL;
int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0;
+ int vcpuextstate = 0;
+ uint32_t vcpuextstate_size = 0;
unsigned long mfn, pfn;
unsigned int prev_pc;
int nraces = 0;
@@ -1069,6 +1088,9 @@ int xc_domain_restore(xc_interface *xch,
/* A copy of the CPU context of the guest. */
DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
+
+ /* A copy of the CPU eXtended States of the guest. */
+ DECLARE_HYPERCALL_BUFFER(void, buffer);
/* A table containing the type of each PFN (/not/ MFN!). */
unsigned long *pfn_type = NULL;
@@ -1156,7 +1178,9 @@ int xc_domain_restore(xc_interface *xch,
{
/* Load the p2m frame list, plus potential extended info chunk */
p2m_frame_list = load_p2m_frame_list(xch, ctx,
- io_fd, &pae_extended_cr3, &ext_vcpucontext);
+ io_fd, &pae_extended_cr3, &ext_vcpucontext,
+ &vcpuextstate, &vcpuextstate_size);
+
if ( !p2m_frame_list )
goto out;
@@ -1303,10 +1327,11 @@ int xc_domain_restore(xc_interface *xch,
if ( !ctx->completed ) {
if ( buffer_tail(xch, ctx, &tailbuf, io_fd, max_vcpu_id, vcpumap,
- ext_vcpucontext) < 0 ) {
+ ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0
) {
ERROR ("error buffering image tail");
goto out;
}
+
ctx->completed = 1;
/*
@@ -1332,7 +1357,7 @@ int xc_domain_restore(xc_interface *xch,
memset(&tmptail, 0, sizeof(tmptail));
tmptail.ishvm = hvm;
if ( buffer_tail(xch, ctx, &tmptail, io_fd, max_vcpu_id, vcpumap,
- ext_vcpucontext) < 0 ) {
+ ext_vcpucontext, vcpuextstate, vcpuextstate_size) < 0 ) {
ERROR ("error buffering image tail, finishing");
goto finish;
}
@@ -1653,7 +1678,7 @@ int xc_domain_restore(xc_interface *xch,
}
if ( !ext_vcpucontext )
- continue;
+ goto vcpu_ext_state_restore;
memcpy(&domctl.u.ext_vcpucontext, vcpup, 128);
vcpup += 128;
domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
@@ -1664,6 +1689,39 @@ int xc_domain_restore(xc_interface *xch,
PERROR("Couldn't set extended vcpu%d info", i);
goto out;
}
+
+ vcpu_ext_state_restore:
+ if ( !vcpuextstate )
+ continue;
+
+ memcpy(&domctl.u.vcpuextstate.xfeature_mask, vcpup,
+ sizeof(domctl.u.vcpuextstate.xfeature_mask));
+ vcpup += sizeof(domctl.u.vcpuextstate.xfeature_mask);
+ memcpy(&domctl.u.vcpuextstate.size, vcpup,
+ sizeof(domctl.u.vcpuextstate.size));
+ vcpup += sizeof(domctl.u.vcpuextstate.size);
+
+ buffer = xc_hypercall_buffer_alloc(xch, buffer,
+ domctl.u.vcpuextstate.size);
+ if ( !buffer )
+ {
+ PERROR("Could not allocate buffer to restore eXtended States");
+ goto out;
+ }
+ memcpy(buffer, vcpup, domctl.u.vcpuextstate.size);
+ vcpup += domctl.u.vcpuextstate.size;
+
+ domctl.cmd = XEN_DOMCTL_setvcpuextstate;
+ domctl.domain = dom;
+ domctl.u.vcpuextstate.vcpu = i;
+ set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+ frc = xc_domctl(xch, &domctl);
+ if ( frc != 0 )
+ {
+ PERROR("Couldn't set eXtended States for vcpu%d", i);
+ goto out;
+ }
+ xc_hypercall_buffer_free(xch, buffer);
}
memcpy(shared_info_page, tailbuf.u.pv.shared_info_page, PAGE_SIZE);
diff -r b3964f2f70e8 -r 16093532f384 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Wed Nov 03 08:16:47 2010 +0000
+++ b/tools/libxc/xc_domain_save.c Wed Nov 03 08:17:39 2010 +0000
@@ -810,14 +810,35 @@ static xen_pfn_t *map_and_save_p2m_table
? sizeof(ctxt.x64)
: sizeof(ctxt.x32));
uint32_t chunk2_sz = 0;
- uint32_t tot_sz = (chunk1_sz + 8) + (chunk2_sz + 8);
+ uint32_t chunk3_sz = 4;
+ uint32_t xcnt_size = 0;
+ uint32_t tot_sz;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+ domctl.domain = dom;
+ domctl.u.vcpuextstate.vcpu = 0;
+ domctl.u.vcpuextstate.size = 0;
+ domctl.u.vcpuextstate.xfeature_mask = 0;
+ if ( xc_domctl(xch, &domctl) < 0 )
+ {
+ PERROR("No extended context for VCPU%d", i);
+ goto out;
+ }
+ xcnt_size = domctl.u.vcpuextstate.size + 2 * sizeof(uint64_t);
+
+ tot_sz = (chunk1_sz + 8) + (chunk2_sz + 8) + (chunk3_sz + 8);
+
if ( write_exact(io_fd, &signature, sizeof(signature)) ||
write_exact(io_fd, &tot_sz, sizeof(tot_sz)) ||
write_exact(io_fd, "vcpu", 4) ||
write_exact(io_fd, &chunk1_sz, sizeof(chunk1_sz)) ||
write_exact(io_fd, &ctxt, chunk1_sz) ||
write_exact(io_fd, "extv", 4) ||
- write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) )
+ write_exact(io_fd, &chunk2_sz, sizeof(chunk2_sz)) ||
+ write_exact(io_fd, "xcnt", 4) ||
+ write_exact(io_fd, &chunk3_sz, sizeof(chunk3_sz)) ||
+ write_exact(io_fd, &xcnt_size, 4) )
{
PERROR("write: extended info");
goto out;
@@ -904,6 +925,9 @@ int xc_domain_save(xc_interface *xch, in
/* base of the region in which domain memory is mapped */
unsigned char *region_base = NULL;
+
+ /* A copy of the CPU eXtended States of the guest. */
+ DECLARE_HYPERCALL_BUFFER(void, buffer);
/* bitmap of pages:
- that should be sent this iteration (unless later marked as skip);
@@ -1786,6 +1810,53 @@ int xc_domain_save(xc_interface *xch, in
PERROR("Error when writing to state file (2)");
goto out;
}
+
+ /* Start to fetch CPU eXtended States */
+ /* Get buffer size first */
+ domctl.cmd = XEN_DOMCTL_getvcpuextstate;
+ domctl.domain = dom;
+ domctl.u.vcpuextstate.vcpu = i;
+ domctl.u.vcpuextstate.xfeature_mask = 0;
+ domctl.u.vcpuextstate.size = 0;
+ if ( xc_domctl(xch, &domctl) < 0 )
+ {
+ PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+ goto out;
+ }
+
+ /* Getting eXtended states data */
+ buffer = xc_hypercall_buffer_alloc(xch, buffer,
domctl.u.vcpuextstate.size);
+ if ( !buffer )
+ {
+ PERROR("Insufficient memory for getting eXtended states for"
+ "VCPU%d", i);
+ goto out;
+ }
+ set_xen_guest_handle(domctl.u.vcpuextstate.buffer, buffer);
+ if ( xc_domctl(xch, &domctl) < 0 )
+ {
+ PERROR("No eXtended states (XSAVE) for VCPU%d", i);
+ goto out;
+ }
+
+ if ( wrexact(io_fd, &domctl.u.vcpuextstate.xfeature_mask,
+ sizeof(domctl.u.vcpuextstate.xfeature_mask)) )
+ {
+ PERROR("Error when writing to state file (2)");
+ goto out;
+ }
+ if ( wrexact(io_fd, &domctl.u.vcpuextstate.size,
+ sizeof(domctl.u.vcpuextstate.size)) )
+ {
+ PERROR("Error when writing to state file (2)");
+ goto out;
+ }
+ if ( wrexact(io_fd, buffer, domctl.u.vcpuextstate.size) )
+ {
+ PERROR("Error when writing to state file (2)");
+ goto out;
+ }
+ xc_hypercall_buffer_free(xch, buffer);
}
/*
diff -r b3964f2f70e8 -r 16093532f384 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/arch/x86/domctl.c Wed Nov 03 08:17:39 2010 +0000
@@ -33,6 +33,7 @@
#include <asm/mem_event.h>
#include <public/mem_event.h>
#include <asm/mem_sharing.h>
+#include <asm/i387.h>
#ifdef XEN_KDB_CONFIG
#include "../kdb/include/kdbdefs.h"
@@ -1406,6 +1407,135 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_setvcpuextstate:
+ case XEN_DOMCTL_getvcpuextstate:
+ {
+ struct xen_domctl_vcpuextstate *evc;
+ struct domain *d;
+ struct vcpu *v;
+ uint32_t offset = 0;
+ uint64_t _xfeature_mask = 0;
+ uint64_t _xcr0, _xcr0_accum;
+ void *receive_buf = NULL, *_xsave_area;
+
+#define PV_XSAVE_SIZE (2 * sizeof(uint64_t) + xsave_cntxt_size)
+
+ evc = &domctl->u.vcpuextstate;
+
+ ret = -ESRCH;
+
+ if ( !cpu_has_xsave )
+ break;
+
+ d = rcu_lock_domain_by_id(domctl->domain);
+ if ( d == NULL )
+ break;
+
+ ret = xsm_vcpuextstate(d, domctl->cmd);
+ if ( ret )
+ goto vcpuextstate_out;
+
+ ret = -ESRCH;
+ if ( (evc->vcpu >= d->max_vcpus) ||
+ ((v = d->vcpu[evc->vcpu]) == NULL) )
+ goto vcpuextstate_out;
+
+ if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
+ {
+ if ( !evc->size && !evc->xfeature_mask )
+ {
+ evc->xfeature_mask = xfeature_mask;
+ evc->size = PV_XSAVE_SIZE;
+ ret = 0;
+ goto vcpuextstate_out;
+ }
+ if ( evc->size != PV_XSAVE_SIZE ||
+ evc->xfeature_mask != xfeature_mask )
+ {
+ ret = -EINVAL;
+ goto vcpuextstate_out;
+ }
+ if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+ offset, (void *)&v->arch.xcr0,
+ sizeof(v->arch.xcr0)) )
+ {
+ ret = -EFAULT;
+ goto vcpuextstate_out;
+ }
+ offset += sizeof(v->arch.xcr0);
+ if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+ offset, (void *)&v->arch.xcr0_accum,
+ sizeof(v->arch.xcr0_accum)) )
+ {
+ ret = -EFAULT;
+ goto vcpuextstate_out;
+ }
+ offset += sizeof(v->arch.xcr0_accum);
+ if ( copy_to_guest_offset(domctl->u.vcpuextstate.buffer,
+ offset, v->arch.xsave_area,
+ xsave_cntxt_size) )
+ {
+ ret = -EFAULT;
+ goto vcpuextstate_out;
+ }
+ }
+ else
+ {
+ ret = -EINVAL;
+
+ _xfeature_mask = evc->xfeature_mask;
+ /* xsave context must be restored on compatible target CPUs */
+ if ( (_xfeature_mask & xfeature_mask) != _xfeature_mask )
+ goto vcpuextstate_out;
+ if ( evc->size > PV_XSAVE_SIZE || evc->size < 2 * sizeof(uint64_t)
)
+ goto vcpuextstate_out;
+
+ receive_buf = xmalloc_bytes(evc->size);
+ if ( !receive_buf )
+ {
+ ret = -ENOMEM;
+ goto vcpuextstate_out;
+ }
+ if ( copy_from_guest_offset(receive_buf,
domctl->u.vcpuextstate.buffer,
+ offset, evc->size) )
+ {
+ ret = -EFAULT;
+ xfree(receive_buf);
+ goto vcpuextstate_out;
+ }
+
+ _xcr0 = *(uint64_t *)receive_buf;
+ _xcr0_accum = *(uint64_t *)(receive_buf + sizeof(uint64_t));
+ _xsave_area = receive_buf + 2 * sizeof(uint64_t);
+
+ if ( !(_xcr0 & XSTATE_FP) || _xcr0 & ~xfeature_mask )
+ {
+ xfree(receive_buf);
+ goto vcpuextstate_out;
+ }
+ if ( (_xcr0 & _xcr0_accum) != _xcr0 )
+ {
+ xfree(receive_buf);
+ goto vcpuextstate_out;
+ }
+
+ v->arch.xcr0 = _xcr0;
+ v->arch.xcr0_accum = _xcr0_accum;
+ memcpy(v->arch.xsave_area, _xsave_area, evc->size - 2 *
sizeof(uint64_t) );
+
+ xfree(receive_buf);
+ }
+
+ ret = 0;
+
+ vcpuextstate_out:
+ rcu_unlock_domain(d);
+ if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) &&
+ copy_to_guest(u_domctl, domctl, 1) )
+ ret = -EFAULT;
+ }
+ break;
+
#ifdef __x86_64__
case XEN_DOMCTL_mem_event_op:
{
@@ -1454,6 +1584,11 @@ void arch_get_info_guest(struct vcpu *v,
#else
#define c(fld) (c.nat->fld)
#endif
+
+ /* Fill legacy context from xsave area first */
+ if ( cpu_has_xsave )
+ memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
+ sizeof(v->arch.guest_context.fpu_ctxt));
if ( !is_pv_32on64_domain(v->domain) )
memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
diff -r b3964f2f70e8 -r 16093532f384 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/arch/x86/hvm/hvm.c Wed Nov 03 08:17:39 2010 +0000
@@ -758,6 +758,17 @@ static int hvm_load_cpu_ctxt(struct doma
memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+ /* In case xsave-absent save file is restored on a xsave-capable host */
+ if ( cpu_has_xsave )
+ {
+ struct xsave_struct *xsave_area = v->arch.xsave_area;
+
+ memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+ xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
+ v->arch.xcr0_accum = XSTATE_FP_SSE;
+ v->arch.xcr0 = XSTATE_FP_SSE;
+ }
+
vc->user_regs.eax = ctxt.rax;
vc->user_regs.ebx = ctxt.rbx;
vc->user_regs.ecx = ctxt.rcx;
@@ -798,6 +809,113 @@ static int hvm_load_cpu_ctxt(struct doma
HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
1, HVMSR_PER_VCPU);
+
+#define HVM_CPU_XSAVE_SIZE (3 * sizeof(uint64_t) + xsave_cntxt_size)
+
+static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+ struct vcpu *v;
+ struct hvm_hw_cpu_xsave *ctxt;
+
+ if ( !cpu_has_xsave )
+ return 0; /* do nothing */
+
+ for_each_vcpu ( d, v )
+ {
+ if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id,
HVM_CPU_XSAVE_SIZE) )
+ return 1;
+ ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+ h->cur += HVM_CPU_XSAVE_SIZE;
+ memset(ctxt, 0, HVM_CPU_XSAVE_SIZE);
+
+ ctxt->xfeature_mask = xfeature_mask;
+ ctxt->xcr0 = v->arch.xcr0;
+ ctxt->xcr0_accum = v->arch.xcr0_accum;
+ if ( v->fpu_initialised )
+ memcpy(&ctxt->save_area,
+ v->arch.xsave_area, xsave_cntxt_size);
+ }
+
+ return 0;
+}
+
+static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
+{
+ int vcpuid;
+ struct vcpu *v;
+ struct hvm_hw_cpu_xsave *ctxt;
+ struct hvm_save_descriptor *desc;
+ uint64_t _xfeature_mask;
+
+ /* fails since we can't restore an img saved on xsave-capable host */
+//XXX:
+ if ( !cpu_has_xsave )
+ return -EINVAL;
+
+ /* Which vcpu is this? */
+ vcpuid = hvm_load_instance(h);
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
+ {
+ gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
+ return -EINVAL;
+ }
+
+ /* Customized checking for entry since our entry is of variable length */
+ desc = (struct hvm_save_descriptor *)&h->data[h->cur];
+ if ( sizeof (*desc) > h->size - h->cur)
+ {
+ gdprintk(XENLOG_WARNING,
+ "HVM restore: not enough data left to read descriptpr"
+ "for type %u\n", CPU_XSAVE_CODE);
+ return -1;
+ }
+ if ( desc->length + sizeof (*desc) > h->size - h->cur)
+ {
+ gdprintk(XENLOG_WARNING,
+ "HVM restore: not enough data left to read %u bytes "
+ "for type %u\n", desc->length, CPU_XSAVE_CODE);
+ return -1;
+ }
+ if ( CPU_XSAVE_CODE != desc->typecode || (desc->length >
HVM_CPU_XSAVE_SIZE) )
+ {
+ gdprintk(XENLOG_WARNING,
+ "HVM restore mismatch: expected type %u with max length %u, "
+ "saw type %u length %u\n", CPU_XSAVE_CODE,
+ (uint32_t)HVM_CPU_XSAVE_SIZE,
+ desc->typecode, desc->length);
+ return -1;
+ }
+ h->cur += sizeof (*desc);
+ /* Checking finished */
+
+ ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+ h->cur += desc->length;
+
+ _xfeature_mask = ctxt->xfeature_mask;
+ if ( (_xfeature_mask & xfeature_mask) != _xfeature_mask )
+ return -EINVAL;
+
+ v->arch.xcr0 = ctxt->xcr0;
+ v->arch.xcr0_accum = ctxt->xcr0_accum;
+ memcpy(v->arch.xsave_area, &ctxt->save_area, xsave_cntxt_size);
+
+ return 0;
+}
+
+/* We need variable length data chunk for xsave area, hence customized
+ * declaration other than HVM_REGISTER_SAVE_RESTORE.
+ */
+static int __hvm_register_CPU_XSAVE_save_and_restore(void)
+{
+ hvm_register_savevm(CPU_XSAVE_CODE,
+ "CPU_XSAVE",
+ hvm_save_cpu_xsave_states,
+ hvm_load_cpu_xsave_states,
+ HVM_CPU_XSAVE_SIZE + sizeof (struct
hvm_save_descriptor),
+ HVMSR_PER_VCPU);
+ return 0;
+}
+__initcall(__hvm_register_CPU_XSAVE_save_and_restore);
int hvm_vcpu_initialise(struct vcpu *v)
{
diff -r b3964f2f70e8 -r 16093532f384 xen/include/public/arch-x86/hvm/save.h
--- a/xen/include/public/arch-x86/hvm/save.h Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/include/public/arch-x86/hvm/save.h Wed Nov 03 08:17:39 2010 +0000
@@ -431,9 +431,32 @@ struct hvm_viridian_context {
DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
+
+/*
+ * The save area of XSAVE/XRSTOR.
+ */
+
+struct hvm_hw_cpu_xsave {
+ uint64_t xfeature_mask;
+ uint64_t xcr0; /* Updated by XSETBV */
+ uint64_t xcr0_accum; /* Updated by XSETBV */
+ struct {
+ struct { char x[512]; } fpu_sse;
+
+ struct {
+ uint64_t xstate_bv; /* Updated by XRSTOR */
+ uint64_t reserved[7];
+ } xsave_hdr; /* The 64-byte header */
+
+ struct { char x[0]; } ymm; /* YMM */
+ } save_area;
+} __attribute__((packed));
+
+#define CPU_XSAVE_CODE 16
+
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 15
+#define HVM_SAVE_CODE_MAX 16
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
diff -r b3964f2f70e8 -r 16093532f384 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/include/public/domctl.h Wed Nov 03 08:17:39 2010 +0000
@@ -780,6 +780,31 @@ struct xen_domctl_mem_sharing_op {
};
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
+
+#if defined(__i386__) || defined(__x86_64__)
+/* XEN_DOMCTL_setvcpuextstate */
+/* XEN_DOMCTL_getvcpuextstate */
+struct xen_domctl_vcpuextstate {
+ /* IN: VCPU that this call applies to. */
+ uint32_t vcpu;
+ /*
+ * SET: xfeature support mask of struct (IN)
+ * GET: xfeature support mask of struct (IN/OUT)
+ * xfeature mask is served as identifications of the saving format
+ * so that compatible CPUs can have a check on format to decide
+ * whether it can restore.
+ */
+ uint64_aligned_t xfeature_mask;
+ /*
+ * SET: Size of struct (IN)
+ * GET: Size of struct (IN/OUT)
+ */
+ uint64_aligned_t size;
+ XEN_GUEST_HANDLE_64(uint64) buffer;
+};
+typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
+#endif
struct xen_domctl {
uint32_t cmd;
@@ -841,6 +866,8 @@ struct xen_domctl {
#define XEN_DOMCTL_gettscinfo 59
#define XEN_DOMCTL_settscinfo 60
#define XEN_DOMCTL_getpageframeinfo3 61
+#define XEN_DOMCTL_setvcpuextstate 62
+#define XEN_DOMCTL_getvcpuextstate 63
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -891,6 +918,7 @@ struct xen_domctl {
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
+ struct xen_domctl_vcpuextstate vcpuextstate;
#endif
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
diff -r b3964f2f70e8 -r 16093532f384 xen/include/xsm/xsm.h
--- a/xen/include/xsm/xsm.h Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/include/xsm/xsm.h Wed Nov 03 08:17:39 2010 +0000
@@ -149,6 +149,7 @@ struct xsm_operations {
int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
int (*pin_mem_cacheattr) (struct domain *d);
int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
+ int (*vcpuextstate) (struct domain *d, uint32_t cmd);
#endif
};
@@ -622,6 +623,10 @@ static inline int xsm_ext_vcpucontext(st
{
return xsm_call(ext_vcpucontext(d, cmd));
}
+static inline int xsm_vcpuextstate(struct domain *d, uint32_t cmd)
+{
+ return xsm_call(vcpuextstate(d, cmd));
+}
#endif /* CONFIG_X86 */
#endif /* __XSM_H */
diff -r b3964f2f70e8 -r 16093532f384 xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/xsm/flask/hooks.c Wed Nov 03 08:17:39 2010 +0000
@@ -1173,6 +1173,25 @@ static int flask_ext_vcpucontext (struct
break;
default:
return -EPERM;
+ }
+
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
+}
+
+static int flask_vcpuextstate (struct domain *d, uint32_t cmd)
+{
+ u32 perm;
+
+ switch ( cmd )
+ {
+ case XEN_DOMCTL_setvcpuextstate:
+ perm = DOMAIN__SETVCPUEXTSTATE;
+ break;
+ case XEN_DOMCTL_getvcpuextstate:
+ perm = DOMAIN__GETVCPUEXTSTATE;
+ break;
+ default:
+ return -EPERM;
}
return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm);
@@ -1328,6 +1347,7 @@ static struct xsm_operations flask_ops =
.bind_pt_irq = flask_bind_pt_irq,
.pin_mem_cacheattr = flask_pin_mem_cacheattr,
.ext_vcpucontext = flask_ext_vcpucontext,
+ .vcpuextstate = flask_vcpuextstate,
#endif
};
diff -r b3964f2f70e8 -r 16093532f384 xen/xsm/flask/include/av_permissions.h
--- a/xen/xsm/flask/include/av_permissions.h Wed Nov 03 08:16:47 2010 +0000
+++ b/xen/xsm/flask/include/av_permissions.h Wed Nov 03 08:17:39 2010 +0000
@@ -51,6 +51,8 @@
#define DOMAIN__TRIGGER 0x00800000UL
#define DOMAIN__GETEXTVCPUCONTEXT 0x01000000UL
#define DOMAIN__SETEXTVCPUCONTEXT 0x02000000UL
+#define DOMAIN__GETVCPUEXTSTATE 0x04000000UL
+#define DOMAIN__SETVCPUEXTSTATE 0x08000000UL
#define HVM__SETHVMC 0x00000001UL
#define HVM__GETHVMC 0x00000002UL
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|