|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 5/8] x86: Remove fully_eager_fpu
Since fully_eager_fpu is always true, remove it and adjust the code
accordingly. At the same time, rename vcpu_restore_fpu_nonlazy since it
always does the full restore.
No functional change intended.
Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 2 +-
xen/arch/x86/hvm/emulate.c | 18 +--------------
xen/arch/x86/i387.c | 35 ++++++++----------------------
xen/arch/x86/include/asm/domain.h | 3 ---
xen/arch/x86/include/asm/hvm/hvm.h | 2 --
xen/arch/x86/include/asm/i387.h | 2 +-
xen/common/efi/runtime.c | 2 +-
7 files changed, 13 insertions(+), 51 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4664264b2f5d..a68b7a583294 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2125,7 +2125,7 @@ static void __context_switch(void)
if ( cpu_has_xsaves && is_hvm_vcpu(n) )
set_msr_xss(n->arch.msrs->xss.raw);
}
- vcpu_restore_fpu_nonlazy(n, false);
+ vcpu_restore_fpu(n);
nd->arch.ctxt_switch->to(n);
}
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 86b1f7535668..2daea084f15c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2630,23 +2630,7 @@ static void cf_check hvmemul_put_fpu(
}
if ( backout == X86EMUL_FPU_fpu )
- {
- /*
- * To back out changes to the register file
- * - in fully eager mode, restore original state immediately,
- * - in lazy mode, simply adjust state such that upon next FPU insn
- * use by the guest we'll reload the state saved (or freshly loaded)
- * by hvmemul_get_fpu().
- */
- if ( curr->arch.fully_eager_fpu )
- vcpu_restore_fpu_nonlazy(curr, false);
- else
- {
- curr->fpu_dirtied = false;
- stts();
- alternative_vcall(hvm_funcs.fpu_leave, curr);
- }
- }
+ vcpu_restore_fpu(curr);
}
static int cf_check hvmemul_tlb_op(
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 7da731865f73..88018397b1ad 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -194,12 +194,8 @@ static inline void fpu_fxsave(struct vcpu *v)
/* VCPU FPU Functions */
/*******************************/
/* Restore FPU state whenever VCPU is schduled in. */
-void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts)
+void vcpu_restore_fpu(struct vcpu *v)
{
- /* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
- if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used )
- goto maybe_stts;
-
ASSERT(!is_idle_vcpu(v));
/* Avoid recursion */
@@ -210,27 +206,16 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool
need_stts)
* above) we also need to restore full state, to prevent subsequently
* saving state belonging to another vCPU.
*/
- if ( v->arch.fully_eager_fpu || xstate_all(v) )
- {
- if ( cpu_has_xsave )
- fpu_xrstor(v, XSTATE_ALL);
- else
- fpu_fxrstor(v);
-
- v->fpu_initialised = 1;
- v->fpu_dirtied = 1;
-
- /* Xen doesn't need TS set, but the guest might. */
- need_stts = is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS);
- }
+ if ( cpu_has_xsave )
+ fpu_xrstor(v, XSTATE_ALL);
else
- {
- fpu_xrstor(v, XSTATE_NONLAZY);
- need_stts = true;
- }
+ fpu_fxrstor(v);
- maybe_stts:
- if ( need_stts )
+ v->fpu_initialised = 1;
+ v->fpu_dirtied = 1;
+
+ /* Xen doesn't need TS set, but the guest might. */
+ if ( is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS) )
stts();
}
@@ -273,8 +258,6 @@ void save_fpu_enable(void)
/* Initialize FPU's context save area */
int vcpu_init_fpu(struct vcpu *v)
{
- v->arch.fully_eager_fpu = true;
-
return xstate_alloc_save_area(v);
}
diff --git a/xen/arch/x86/include/asm/domain.h
b/xen/arch/x86/include/asm/domain.h
index ad7f6adb2cb9..bd7b02085ef8 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -671,9 +671,6 @@ struct arch_vcpu
* and thus should be saved/restored. */
bool nonlazy_xstate_used;
- /* Restore all FPU state (lazy and non-lazy state) on context switch? */
- bool fully_eager_fpu;
-
struct vmce vmce;
struct paging_vcpu paging;
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index 7d9774df59fe..a9425c8cffe8 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -160,8 +160,6 @@ struct hvm_function_table {
void (*cpuid_policy_changed)(struct vcpu *v);
- void (*fpu_leave)(struct vcpu *v);
-
int (*get_guest_pat)(struct vcpu *v, uint64_t *gpat);
int (*set_guest_pat)(struct vcpu *v, uint64_t gpat);
diff --git a/xen/arch/x86/include/asm/i387.h b/xen/arch/x86/include/asm/i387.h
index da0c7e945f95..fe5e4419b6f4 100644
--- a/xen/arch/x86/include/asm/i387.h
+++ b/xen/arch/x86/include/asm/i387.h
@@ -27,7 +27,7 @@ struct ix87_env {
uint16_t fds, _res6;
};
-void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts);
+void vcpu_restore_fpu(struct vcpu *v);
void vcpu_save_fpu(struct vcpu *v);
void save_fpu_enable(void);
int vcpu_init_fpu(struct vcpu *v);
diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c
index ba27c62132d2..982e42e8f341 100644
--- a/xen/common/efi/runtime.c
+++ b/xen/common/efi/runtime.c
@@ -157,7 +157,7 @@ void efi_rs_leave(struct efi_rs_state *state)
irq_exit();
efi_rs_on_cpu = NR_CPUS;
spin_unlock(&efi_rs_lock);
- vcpu_restore_fpu_nonlazy(curr, true);
+ vcpu_restore_fpu(curr);
}
bool efi_rs_using_pgtables(void)
--
2.53.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |