|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 7/8] x86/xstate: Stop tracking nonlazy xstate use
With the removal of lazy FPU, the full state is always restored on context
switch so stop tracking whether nonlazy xstate is used.
Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
xen/arch/x86/domctl.c | 1 -
xen/arch/x86/hvm/hvm.c | 1 -
xen/arch/x86/i387.c | 12 +-----------
xen/arch/x86/include/asm/domain.h | 3 ---
xen/arch/x86/include/asm/xstate.h | 6 ++----
xen/arch/x86/xstate.c | 3 ---
6 files changed, 3 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index d9b08182ac1d..a9fbb2d405b7 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1013,7 +1013,6 @@ long arch_do_domctl(
v->arch.xcr0 = _xcr0;
v->arch.xcr0_accum = _xcr0_accum;
- v->arch.nonlazy_xstate_used = _xcr0_accum & XSTATE_NONLAZY;
compress_xsave_states(v, _xsave_area,
evc->size - PV_XSAVE_HDR_SIZE);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d4ba82845146..0e7b66d726ae 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1383,7 +1383,6 @@ static int cf_check hvm_load_cpu_xsave_states(
v->arch.xcr0 = ctxt->xcr0;
v->arch.xcr0_accum = ctxt->xcr0_accum;
- v->arch.nonlazy_xstate_used = ctxt->xcr0_accum & XSTATE_NONLAZY;
compress_xsave_states(v, &ctxt->save_area,
size - offsetof(struct hvm_hw_cpu_xsave, save_area));
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 5e893a2aab94..9acaaf4673df 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -108,25 +108,18 @@ static inline void fpu_fxrstor(struct vcpu *v)
/* FPU Save Functions */
/*******************************/
-static inline uint64_t vcpu_xsave_mask(const struct vcpu *v)
-{
- return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY;
-}
-
/* Save x87 extended state */
static inline void fpu_xsave(struct vcpu *v)
{
bool ok;
- uint64_t mask = vcpu_xsave_mask(v);
- ASSERT(mask);
/*
* XCR0 normally represents what guest OS set. In case of Xen itself,
* we set the accumulated feature mask before doing save/restore.
*/
ok = set_xcr0(v->arch.xcr0_accum | XSTATE_FP_SSE);
ASSERT(ok);
- xsave(v, mask);
+ xsave(v, XSTATE_ALL);
ok = set_xcr0(v->arch.xcr0 ?: XSTATE_FP_SSE);
ASSERT(ok);
}
@@ -202,9 +195,6 @@ void vcpu_restore_fpu(struct vcpu *v)
*/
static bool _vcpu_save_fpu(struct vcpu *v)
{
- if ( !v->arch.nonlazy_xstate_used )
- return false;
-
ASSERT(!is_idle_vcpu(v));
/* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
diff --git a/xen/arch/x86/include/asm/domain.h
b/xen/arch/x86/include/asm/domain.h
index bd7b02085ef8..385a6666dafa 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -667,9 +667,6 @@ struct arch_vcpu
* it explicitly enables it via xcr0.
*/
uint64_t xcr0_accum;
- /* This variable determines whether nonlazy extended state has been used,
- * and thus should be saved/restored. */
- bool nonlazy_xstate_used;
struct vmce vmce;
diff --git a/xen/arch/x86/include/asm/xstate.h
b/xen/arch/x86/include/asm/xstate.h
index ca38c43ec1c3..c96d75e38b25 100644
--- a/xen/arch/x86/include/asm/xstate.h
+++ b/xen/arch/x86/include/asm/xstate.h
@@ -33,12 +33,10 @@ extern uint32_t mxcsr_mask;
#define XSTATE_FP_SSE (X86_XCR0_X87 | X86_XCR0_SSE)
#define XCNTXT_MASK (X86_XCR0_X87 | X86_XCR0_SSE | X86_XCR0_YMM | \
X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \
- XSTATE_NONLAZY)
+ X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU | \
+ X86_XCR0_TILE_CFG | X86_XCR0_TILE_DATA)
#define XSTATE_ALL (~(1ULL << 63))
-#define XSTATE_NONLAZY (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU | \
- X86_XCR0_TILE_CFG | X86_XCR0_TILE_DATA)
-#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
#define XSTATE_XSAVES_ONLY 0
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 11d390cac985..658f372b8c51 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -991,9 +991,6 @@ int handle_xsetbv(u32 index, u64 new_bv)
curr->arch.xcr0 = new_bv;
curr->arch.xcr0_accum |= new_bv;
- if ( new_bv & XSTATE_NONLAZY )
- curr->arch.nonlazy_xstate_used = 1;
-
mask &= ~XSTATE_FP_SSE;
if ( mask )
{
--
2.53.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |