[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR



This patch is trying to make issues around XSAVE/XRSTOR induced in future easy 
to be exposed.

Jimmy

x86: add strictly sanity check for XSAVE/XRSTOR

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>

diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c     Fri Feb 18 16:00:41 2011 +0800
@@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
 
     /* Fill legacy context from xsave area first */
     if ( cpu_has_xsave )
+    {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
                sizeof(v->arch.guest_context.fpu_ctxt));
+    }
 
     if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r 
137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Feb 18 16:03:23 2011 +0800
@@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
     {
         struct xsave_struct *xsave_area = v->arch.xsave_area;
 
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
         xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
         v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int 
hvm_save_cpu_xsave_states(str
     if ( !cpu_has_xsave )
         return 0;   /* do nothing */
 
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
     for_each_vcpu ( d, v )
     {
         if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, 
HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int 
hvm_save_cpu_xsave_states(str
         ctxt->xcr0 = v->arch.xcr0;
         ctxt->xcr0_accum = v->arch.xcr0_accum;
         if ( v->fpu_initialised )
+        {
+            ASSERT(v->arch.xsave_area);
+
             memcpy(&ctxt->save_area,
                 v->arch.xsave_area, xsave_cntxt_size);
+        }
     }
 
     return 0;
@@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+    ASSERT(v->arch.xsave_area);
 
     /* Customized checking for entry since our entry is of variable length */
     desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r 
137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c       Fri Feb 18 16:00:41 2011 +0800
@@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /*
          * XCR0 normally represents what guest OS set. In case of Xen itself, 
          * we set all supported feature mask before doing save/restore.
@@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /* XCR0 normally represents what guest OS set. In case of Xen itself,
          * we set all accumulated feature mask before doing save/restore.
          */

Attachment: xsave_sanity_check.patch
Description: xsave_sanity_check.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.