|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 2/8] x86/vmx: Remove lazy FPU support
Remove lazy FPU support from the VMX code since fully_eager_fpu is now
always true.
No functional change intended.
Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmcs.c | 8 +--
xen/arch/x86/hvm/vmx/vmx.c | 70 +------------------------
xen/arch/x86/hvm/vmx/vvmx.c | 15 +-----
xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 2 -
4 files changed, 5 insertions(+), 90 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index c2e7f9aed39f..8e52ef4d497a 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1247,10 +1247,7 @@ static int construct_vmcs(struct vcpu *v)
__vmwrite(HOST_TR_SELECTOR, TSS_SELECTOR);
/* Host control registers. */
- v->arch.hvm.vmx.host_cr0 = read_cr0() & ~X86_CR0_TS;
- if ( !v->arch.fully_eager_fpu )
- v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
+ __vmwrite(HOST_CR0, read_cr0());
__vmwrite(HOST_CR4, mmu_cr4_features);
if ( cpu_has_vmx_efer )
__vmwrite(HOST_EFER, read_efer());
@@ -1330,8 +1327,7 @@ static int construct_vmcs(struct vcpu *v)
__vmwrite(VMCS_LINK_POINTER, ~0UL);
v->arch.hvm.vmx.exception_bitmap = HVM_TRAP_MASK
- | (paging_mode_hap(d) ? 0 : (1U << X86_EXC_PF))
- | (v->arch.fully_eager_fpu ? 0 : (1U << X86_EXC_NM));
+ | (paging_mode_hap(d) ? 0 : (1U << X86_EXC_PF));
if ( cpu_has_vmx_notify_vm_exiting )
__vmwrite(NOTIFY_WINDOW, vm_notify_window);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 82c55f49aea9..03daf2f52bf2 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -68,7 +68,6 @@ static void cf_check vmx_update_guest_cr(
struct vcpu *v, unsigned int cr, unsigned int flags);
static void cf_check vmx_update_guest_efer(struct vcpu *v);
static void cf_check vmx_wbinvd_intercept(void);
-static void cf_check vmx_fpu_dirty_intercept(void);
static int cf_check vmx_msr_read_intercept(
unsigned int msr, uint64_t *msr_content);
static int cf_check vmx_msr_write_intercept(
@@ -1130,41 +1129,6 @@ static int cf_check vmx_load_vmcs_ctxt(struct vcpu *v,
struct hvm_hw_cpu *ctxt)
return 0;
}
-static void vmx_fpu_enter(struct vcpu *v)
-{
- vcpu_restore_fpu_lazy(v);
- v->arch.hvm.vmx.exception_bitmap &= ~(1u << X86_EXC_NM);
- vmx_update_exception_bitmap(v);
- v->arch.hvm.vmx.host_cr0 &= ~X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
-}
-
-static void cf_check vmx_fpu_leave(struct vcpu *v)
-{
- ASSERT(!v->fpu_dirtied);
- ASSERT(read_cr0() & X86_CR0_TS);
-
- if ( !(v->arch.hvm.vmx.host_cr0 & X86_CR0_TS) )
- {
- v->arch.hvm.vmx.host_cr0 |= X86_CR0_TS;
- __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
- }
-
- /*
- * If the guest does not have TS enabled then we must cause and handle an
- * exception on first use of the FPU. If the guest *does* have TS enabled
- * then this is not necessary: no FPU activity can occur until the guest
- * clears CR0.TS, and we will initialise the FPU when that happens.
- */
- if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
- {
- v->arch.hvm.hw_cr[0] |= X86_CR0_TS;
- __vmwrite(GUEST_CR0, v->arch.hvm.hw_cr[0]);
- v->arch.hvm.vmx.exception_bitmap |= (1u << X86_EXC_NM);
- vmx_update_exception_bitmap(v);
- }
-}
-
static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
{
/*
@@ -1187,8 +1151,6 @@ static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
vmx_vmcs_reload(v);
}
- if ( !v->arch.fully_eager_fpu )
- vmx_fpu_leave(v);
vmx_save_guest_msrs(v);
vmx_restore_host_msrs();
vmx_save_dr(v);
@@ -1771,17 +1733,6 @@ static void cf_check vmx_update_guest_cr(
else
nvmx_set_cr_read_shadow(v, 0);
- if ( !(v->arch.hvm.guest_cr[0] & X86_CR0_TS) )
- {
- if ( v != current )
- {
- if ( !v->arch.fully_eager_fpu )
- hw_cr0_mask |= X86_CR0_TS;
- }
- else if ( v->arch.hvm.hw_cr[0] & X86_CR0_TS )
- vmx_fpu_enter(v);
- }
-
realmode = !(v->arch.hvm.guest_cr[0] & X86_CR0_PE);
if ( !vmx_unrestricted_guest(v) &&
@@ -2915,7 +2866,6 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
.cpuid_policy_changed = vmx_cpuid_policy_changed,
- .fpu_leave = vmx_fpu_leave,
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
@@ -2927,7 +2877,6 @@ static struct hvm_function_table __initdata_cf_clobber
vmx_function_table = {
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
.wbinvd_intercept = vmx_wbinvd_intercept,
- .fpu_dirty_intercept = vmx_fpu_dirty_intercept,
.msr_read_intercept = vmx_msr_read_intercept,
.msr_write_intercept = vmx_msr_write_intercept,
.handle_cd = vmx_handle_cd,
@@ -3281,20 +3230,6 @@ void update_guest_eip(void)
hvm_inject_hw_exception(X86_EXC_DB, X86_EVENT_NO_EC);
}
-static void cf_check vmx_fpu_dirty_intercept(void)
-{
- struct vcpu *curr = current;
-
- vmx_fpu_enter(curr);
-
- /* Disable TS in guest CR0 unless the guest wants the exception too. */
- if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_TS) )
- {
- curr->arch.hvm.hw_cr[0] &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, curr->arch.hvm.hw_cr[0]);
- }
-}
-
static void vmx_dr_access(unsigned long exit_qualification,
struct cpu_user_regs *regs)
{
@@ -4543,10 +4478,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs
*regs)
domain_pause_for_debugger();
}
break;
- case X86_EXC_NM:
- TRACE(TRC_HVM_TRAP, vector);
- vmx_fpu_dirty_intercept();
- break;
+
case X86_EXC_PF:
__vmread(EXIT_QUALIFICATION, &exit_qualification);
__vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 38952f06961e..e4cdfe55c18e 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1238,9 +1238,6 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
regs->rsp = get_vvmcs(v, GUEST_RSP);
regs->rflags = get_vvmcs(v, GUEST_RFLAGS);
- /* updating host cr0 to sync TS bit */
- __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
-
/* Setup virtual ETP for L2 guest*/
if ( nestedhvm_paging_mode_hap(v) )
/* This will setup the initial np2m for the nested vCPU */
@@ -1468,9 +1465,6 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
/* VM exit clears all bits except bit 1 */
regs->rflags = X86_EFLAGS_MBS;
- /* updating host cr0 to sync TS bit */
- __vmwrite(HOST_CR0, v->arch.hvm.vmx.host_cr0);
-
if ( cpu_has_vmx_virtual_intr_delivery )
nvmx_update_apicv(v);
@@ -2458,19 +2452,14 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
__vmread(VM_EXIT_INTR_INFO, &intr_info);
vector = intr_info & INTR_INFO_VECTOR_MASK;
/*
- * decided by L0 and L1 exception bitmap, if the vetor is set by
- * both, L0 has priority on #PF and #NM, L1 has priority on others
+ * decided by L0 and L1 exception bitmap, if the vector is set by
+ * both, L0 has priority on #PF, L1 has priority on others
*/
if ( vector == X86_EXC_PF )
{
if ( paging_mode_hap(v->domain) )
nvcpu->nv_vmexit_pending = 1;
}
- else if ( vector == X86_EXC_NM )
- {
- if ( v->fpu_dirtied )
- nvcpu->nv_vmexit_pending = 1;
- }
else if ( (intr_info & valid_mask) == valid_mask )
{
exec_bitmap = get_vvmcs(v, EXCEPTION_BITMAP);
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 879ec10cefd0..88bded5190c9 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -149,8 +149,6 @@ struct vmx_vcpu {
DECLARE_BITMAP(eoi_exit_bitmap, X86_IDT_VECTORS);
struct pi_desc pi_desc;
- unsigned long host_cr0;
-
/* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */
bool ept_spurious_misconfig;
--
2.53.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |