# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1288371886 -3600
# Node ID 1694027134b3d7220d07a6e0d205fd8b1410363e
# Parent fb34514bf76bf575164ff83130b1cf9c8e3fea5a
x86: Clean up existing XSAVE support
Signed-off-by: Han Weidong <weidong.han@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 10 +++++-----
xen/arch/x86/hvm/vmx/vmx.c | 7 +++----
xen/arch/x86/i387.c | 16 ++++++----------
xen/include/asm-x86/hvm/vcpu.h | 2 +-
xen/include/asm-x86/i387.h | 24 ++++++++++--------------
5 files changed, 25 insertions(+), 34 deletions(-)
diff -r fb34514bf76b -r 1694027134b3 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Fri Oct 29 10:40:14 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Fri Oct 29 18:04:46 2010 +0100
@@ -814,7 +814,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
xsave_init_save_area(xsave_area);
v->arch.hvm_vcpu.xsave_area = xsave_area;
- v->arch.hvm_vcpu.xfeature_mask = XSTATE_FP_SSE;
+ v->arch.hvm_vcpu.xcr0 = XSTATE_FP_SSE;
}
if ( (rc = vlapic_init(v)) != 0 )
@@ -2002,8 +2002,8 @@ void hvm_cpuid(unsigned int input, unsig
if ( cpu_has_xsave )
{
/*
- * Fix up "Processor Extended State Enumeration". We only present
- * FPU(bit0) and SSE(bit1) to HVM guest for now.
+ * Fix up "Processor Extended State Enumeration". We present
+ * FPU(bit0), SSE(bit1) and YMM(bit2) to HVM guest for now.
*/
*eax = *ebx = *ecx = *edx = 0;
switch ( count )
@@ -2012,14 +2012,14 @@ void hvm_cpuid(unsigned int input, unsig
/* No HW defines bit in EDX yet. */
*edx = 0;
/* We only enable the features we know. */
- *eax = xfeature_low;
+ *eax = xfeature_mask;
/* FP/SSE + XSAVE.HEADER + YMM. */
*ecx = 512 + 64 + ((*eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
/* Let ebx equal ecx at present. */
*ebx = *ecx;
break;
case 2:
- if ( !(xfeature_low & XSTATE_YMM) )
+ if ( !(xfeature_mask & XSTATE_YMM) )
break;
*eax = XSTATE_YMM_SIZE;
*ebx = XSTATE_YMM_OFFSET;
diff -r fb34514bf76b -r 1694027134b3 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Fri Oct 29 10:40:14 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Oct 29 18:04:46 2010 +0100
@@ -2203,20 +2203,19 @@ static int vmx_handle_xsetbv(u64 new_bv)
static int vmx_handle_xsetbv(u64 new_bv)
{
struct vcpu *v = current;
- u64 xfeature = (((u64)xfeature_high) << 32) | xfeature_low;
struct segment_register sreg;
hvm_get_segment_register(v, x86_seg_ss, &sreg);
if ( sreg.attr.fields.dpl != 0 )
goto err;
- if ( ((new_bv ^ xfeature) & ~xfeature) || !(new_bv & 1) )
+ if ( ((new_bv ^ xfeature_mask) & ~xfeature_mask) || !(new_bv & 1) )
goto err;
- if ( (xfeature & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
+ if ( (xfeature_mask & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
goto err;
- v->arch.hvm_vcpu.xfeature_mask = new_bv;
+ v->arch.hvm_vcpu.xcr0 = new_bv;
set_xcr0(new_bv);
return 0;
err:
diff -r fb34514bf76b -r 1694027134b3 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c Fri Oct 29 10:40:14 2010 +0100
+++ b/xen/arch/x86/i387.c Fri Oct 29 18:04:46 2010 +0100
@@ -142,7 +142,7 @@ u32 xsave_cntxt_size;
u32 xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
-u32 xfeature_low, xfeature_high;
+u64 xfeature_mask;
void xsave_init(void)
{
@@ -186,15 +186,15 @@ void xsave_init(void)
* We know FP/SSE and YMM about eax, and nothing about edx at present.
*/
xsave_cntxt_size = ebx;
- xfeature_low = eax & XCNTXT_MASK;
- xfeature_high = 0;
- printk("%s: using cntxt_size: 0x%x and states: %08x:%08x\n",
- __func__, xsave_cntxt_size, xfeature_high, xfeature_low);
+ xfeature_mask = eax + ((u64)edx << 32);
+ xfeature_mask &= XCNTXT_MASK;
+ printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
+ __func__, xsave_cntxt_size, xfeature_mask);
}
else
{
BUG_ON(xsave_cntxt_size != ebx);
- BUG_ON(xfeature_low != (eax & XCNTXT_MASK));
+ BUG_ON(xfeature_mask != (xfeature_mask & XCNTXT_MASK));
}
}
@@ -202,11 +202,7 @@ void xsave_init_save_area(void *save_are
{
memset(save_area, 0, xsave_cntxt_size);
- ((u16 *)save_area)[0] = 0x37f; /* FCW */
- ((u16 *)save_area)[2] = 0xffff; /* FTW */
((u32 *)save_area)[6] = 0x1f80; /* MXCSR */
-
- ((struct xsave_struct *)save_area)->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
}
/*
diff -r fb34514bf76b -r 1694027134b3 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Fri Oct 29 10:40:14 2010 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h Fri Oct 29 18:04:46 2010 +0100
@@ -56,7 +56,7 @@ struct hvm_vcpu {
* #NM handler, we XRSTOR the states we XSAVE-ed;
*/
void *xsave_area;
- uint64_t xfeature_mask;
+ uint64_t xcr0;
struct vlapic vlapic;
s64 cache_tsc_offset;
diff -r fb34514bf76b -r 1694027134b3 xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h Fri Oct 29 10:40:14 2010 +0100
+++ b/xen/include/asm-x86/i387.h Fri Oct 29 18:04:46 2010 +0100
@@ -15,7 +15,7 @@
#include <asm/processor.h>
extern unsigned int xsave_cntxt_size;
-extern u32 xfeature_low, xfeature_high;
+extern u64 xfeature_mask;
extern void xsave_init(void);
extern void xsave_init_save_area(void *save_area);
@@ -49,45 +49,41 @@ struct xsave_struct
#define REX_PREFIX
#endif
-static inline void xsetbv(u32 index, u64 xfeature_mask)
+static inline void xsetbv(u32 index, u64 xfeatures)
{
- u32 hi = xfeature_mask >> 32;
- u32 lo = (u32)xfeature_mask;
+ u32 hi = xfeatures >> 32;
+ u32 lo = (u32)xfeatures;
asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
"a" (lo), "d" (hi));
}
-static inline void set_xcr0(u64 xfeature_mask)
+static inline void set_xcr0(u64 xfeatures)
{
- xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeature_mask);
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
}
static inline void xsave(struct vcpu *v)
{
- u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
- u32 lo = mask, hi = mask >> 32;
struct xsave_struct *ptr;
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
:
- : "a" (lo), "d" (hi), "D"(ptr)
+ : "a" (-1), "d" (-1), "D"(ptr)
: "memory");
}
static inline void xrstor(struct vcpu *v)
{
- u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
- u32 lo = mask, hi = mask >> 32;
struct xsave_struct *ptr;
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
:
- : "m" (*ptr), "a" (lo), "d" (hi), "D"(ptr));
+ : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
}
extern void init_fpu(void);
@@ -117,9 +113,9 @@ static inline void setup_fpu(struct vcpu
if ( !v->fpu_initialised )
v->fpu_initialised = 1;
- set_xcr0(v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE);
+ set_xcr0(v->arch.hvm_vcpu.xcr0 | XSTATE_FP_SSE);
xrstor(v);
- set_xcr0(v->arch.hvm_vcpu.xfeature_mask);
+ set_xcr0(v->arch.hvm_vcpu.xcr0);
}
else
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|