[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v1 5/8] x86: Remove fully_eager_fpu


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
  • Date: Thu, 19 Mar 2026 13:29:21 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=B7cITq8S4SbzEQfFivGFMCvfmxslS6DmBYa6hzW1TBw=; b=HOhXpVtU8Jf1vEVxYbivCTOGWvGAQZZjQBAyRrVlqrW4RrYXOEgoDbcn9Zas3IF+e/CchhD3rfIMuB5OcvUsajo9EmMWEAkhYS0vCAyT493PcoHdPJ3hztWnbpTbj6SKt741uq+UXgZwFiAgV+0o0HxzWWFYYhFnp2I2VdXPzONhWg5Vu2lT2S1V4P0Ch1/gvhO5NJuPpAQupnoltoOs3FDGythl5J/x/nx3zPmI/xXExqfZUcJcKBJ3thkZYKfsVQs2SmRis4SkVihWD9cdHcU9+qoTrLIUamPKVSiK04bqO8vAvGwwrqpHmCHCiEXItNYmGMdEIDYpTJ3An8q0xw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=ExD+OMXRCvsZguSFISVx2VoyHSRdpClxcaRMulIYjyRZR7x3P99OWvn9cp2IaAR9uPlKFCQF3e6JYkVFWooz3HBQbrRJwc9S8mi+HrZzYsZkTbqTLRavXj1YM3hHztzKimo3U/W5sEe6DsoQGgirN/+voJeS0jlj6NQLtu2MnVC+15JrVEtDz2bBL8BE9n3+s0hYReJH463nMy6t7J6bSluyYOcSjwxIU/SVI67fw4hXSxzWOSeTlit2cqaqSm1gy9TJO/VcjIg/Oumed3lPIIxypaUldBJkMw3foQZcA0S1WL2KfqGK2iQlLu7hWJba1HVDFlZcSRThF4t+G//UIA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=citrix.com;
  • Cc: Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, "Daniel P. Smith" <dpsmith@xxxxxxxxxxxxxxxxxxxx>, Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx>, Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
  • Delivery-date: Thu, 19 Mar 2026 13:29:51 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Since fully_eager_fpu is always true, remove it and adjust the code
accordingly. At the same time, rename vcpu_restore_fpu_nonlazy since it
always does the full restore.

No functional change intended.

Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---
 xen/arch/x86/domain.c              |  2 +-
 xen/arch/x86/hvm/emulate.c         | 18 +--------------
 xen/arch/x86/i387.c                | 35 ++++++++----------------------
 xen/arch/x86/include/asm/domain.h  |  3 ---
 xen/arch/x86/include/asm/hvm/hvm.h |  2 --
 xen/arch/x86/include/asm/i387.h    |  2 +-
 xen/common/efi/runtime.c           |  2 +-
 7 files changed, 13 insertions(+), 51 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4664264b2f5d..a68b7a583294 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2125,7 +2125,7 @@ static void __context_switch(void)
             if ( cpu_has_xsaves && is_hvm_vcpu(n) )
                 set_msr_xss(n->arch.msrs->xss.raw);
         }
-        vcpu_restore_fpu_nonlazy(n, false);
+        vcpu_restore_fpu(n);
         nd->arch.ctxt_switch->to(n);
     }
 
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 86b1f7535668..2daea084f15c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2630,23 +2630,7 @@ static void cf_check hvmemul_put_fpu(
     }
 
     if ( backout == X86EMUL_FPU_fpu )
-    {
-        /*
-         * To back out changes to the register file
-         * - in fully eager mode, restore original state immediately,
-         * - in lazy mode, simply adjust state such that upon next FPU insn
-         *   use by the guest we'll reload the state saved (or freshly loaded)
-         *   by hvmemul_get_fpu().
-         */
-        if ( curr->arch.fully_eager_fpu )
-            vcpu_restore_fpu_nonlazy(curr, false);
-        else
-        {
-            curr->fpu_dirtied = false;
-            stts();
-            alternative_vcall(hvm_funcs.fpu_leave, curr);
-        }
-    }
+        vcpu_restore_fpu(curr);
 }
 
 static int cf_check hvmemul_tlb_op(
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 7da731865f73..88018397b1ad 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -194,12 +194,8 @@ static inline void fpu_fxsave(struct vcpu *v)
 /*       VCPU FPU Functions    */
 /*******************************/
 /* Restore FPU state whenever VCPU is schduled in. */
-void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts)
+void vcpu_restore_fpu(struct vcpu *v)
 {
-    /* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
-    if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used )
-        goto maybe_stts;
-
     ASSERT(!is_idle_vcpu(v));
 
     /* Avoid recursion */
@@ -210,27 +206,16 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool 
need_stts)
      * above) we also need to restore full state, to prevent subsequently
      * saving state belonging to another vCPU.
      */
-    if ( v->arch.fully_eager_fpu || xstate_all(v) )
-    {
-        if ( cpu_has_xsave )
-            fpu_xrstor(v, XSTATE_ALL);
-        else
-            fpu_fxrstor(v);
-
-        v->fpu_initialised = 1;
-        v->fpu_dirtied = 1;
-
-        /* Xen doesn't need TS set, but the guest might. */
-        need_stts = is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS);
-    }
+    if ( cpu_has_xsave )
+        fpu_xrstor(v, XSTATE_ALL);
     else
-    {
-        fpu_xrstor(v, XSTATE_NONLAZY);
-        need_stts = true;
-    }
+        fpu_fxrstor(v);
 
- maybe_stts:
-    if ( need_stts )
+    v->fpu_initialised = 1;
+    v->fpu_dirtied = 1;
+
+    /* Xen doesn't need TS set, but the guest might. */
+    if ( is_pv_vcpu(v) && (v->arch.pv.ctrlreg[0] & X86_CR0_TS) )
         stts();
 }
 
@@ -273,8 +258,6 @@ void save_fpu_enable(void)
 /* Initialize FPU's context save area */
 int vcpu_init_fpu(struct vcpu *v)
 {
-    v->arch.fully_eager_fpu = true;
-
     return xstate_alloc_save_area(v);
 }
 
diff --git a/xen/arch/x86/include/asm/domain.h 
b/xen/arch/x86/include/asm/domain.h
index ad7f6adb2cb9..bd7b02085ef8 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -671,9 +671,6 @@ struct arch_vcpu
      * and thus should be saved/restored. */
     bool nonlazy_xstate_used;
 
-    /* Restore all FPU state (lazy and non-lazy state) on context switch? */
-    bool fully_eager_fpu;
-
     struct vmce vmce;
 
     struct paging_vcpu paging;
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index 7d9774df59fe..a9425c8cffe8 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -160,8 +160,6 @@ struct hvm_function_table {
 
     void (*cpuid_policy_changed)(struct vcpu *v);
 
-    void (*fpu_leave)(struct vcpu *v);
-
     int  (*get_guest_pat)(struct vcpu *v, uint64_t *gpat);
     int  (*set_guest_pat)(struct vcpu *v, uint64_t gpat);
 
diff --git a/xen/arch/x86/include/asm/i387.h b/xen/arch/x86/include/asm/i387.h
index da0c7e945f95..fe5e4419b6f4 100644
--- a/xen/arch/x86/include/asm/i387.h
+++ b/xen/arch/x86/include/asm/i387.h
@@ -27,7 +27,7 @@ struct ix87_env {
     uint16_t fds, _res6;
 };
 
-void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool need_stts);
+void vcpu_restore_fpu(struct vcpu *v);
 void vcpu_save_fpu(struct vcpu *v);
 void save_fpu_enable(void);
 int vcpu_init_fpu(struct vcpu *v);
diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c
index ba27c62132d2..982e42e8f341 100644
--- a/xen/common/efi/runtime.c
+++ b/xen/common/efi/runtime.c
@@ -157,7 +157,7 @@ void efi_rs_leave(struct efi_rs_state *state)
     irq_exit();
     efi_rs_on_cpu = NR_CPUS;
     spin_unlock(&efi_rs_lock);
-    vcpu_restore_fpu_nonlazy(curr, true);
+    vcpu_restore_fpu(curr);
 }
 
 bool efi_rs_using_pgtables(void)
-- 
2.53.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.