[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC] x86/EFI: allow FPU/XMM use in runtime service functions



UEFI spec update 2.4B developed a requirement to enter runtime service
functions with CR0.TS (and CR0.EM) clear, thus making feasible the
already previously stated permission for these functions to use some of
the XMM registers. Enforce this requirement (along with the connected
ones on FPU control word and MXCSR) by going through a full FPU save
cycle (if the FPU was dirty) in efi_rs_enter() (along with loading  the
specified values into the other two registers).

Note that the UEFI spec mandates that extension registers other than
XMM ones (for our purposes all that get restored eagerly) are preserved
across runtime function calls, hence there's nothing we need to restore
in efi_rs_leave() (they do get saved, but just for simplicity's sake).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
RFC because the patch is yet to be tested on a system where XMM
registers are actually being used by runtime service calls.

--- a/xen/arch/x86/efi/runtime.c
+++ b/xen/arch/x86/efi/runtime.c
@@ -10,6 +10,8 @@ DEFINE_XEN_GUEST_HANDLE(CHAR16);
 
 #ifndef COMPAT
 
+# include <asm/i387.h>
+# include <asm/xstate.h>
 # include <public/platform.h>
 
 const bool_t efi_enabled = 1;
@@ -45,8 +47,14 @@ const struct efi_pci_rom *__read_mostly 
 
 unsigned long efi_rs_enter(void)
 {
+    static const u16 fcw = FCW_DEFAULT;
+    static const u32 mxcsr = MXCSR_DEFAULT;
     unsigned long cr3 = read_cr3();
 
+    save_fpu_enable();
+    asm volatile ( "fldcw %0" :: "m" (fcw) );
+    asm volatile ( "ldmxcsr %0" :: "m" (mxcsr) );
+
     spin_lock(&efi_rs_lock);
 
     /* prevent fixup_page_fault() from doing anything */
@@ -82,6 +90,7 @@ void efi_rs_leave(unsigned long cr3)
     }
     irq_exit();
     spin_unlock(&efi_rs_lock);
+    stts();
 }
 
 unsigned long efi_get_time(void)
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -266,10 +266,10 @@ void vcpu_restore_fpu_lazy(struct vcpu *
  * On each context switch, save the necessary FPU info of VCPU being switch 
  * out. It dispatches saving operation based on CPU's capability.
  */
-void vcpu_save_fpu(struct vcpu *v)
+static bool_t _vcpu_save_fpu(struct vcpu *v)
 {
     if ( !v->fpu_dirtied && !v->arch.nonlazy_xstate_used )
-        return;
+        return 0;
 
     ASSERT(!is_idle_vcpu(v));
 
@@ -284,9 +284,22 @@ void vcpu_save_fpu(struct vcpu *v)
         fpu_fsave(v);
 
     v->fpu_dirtied = 0;
+
+    return 1;
+}
+
+void vcpu_save_fpu(struct vcpu *v)
+{
+    _vcpu_save_fpu(v);
     stts();
 }
 
+void save_fpu_enable(void)
+{
+    if ( !_vcpu_save_fpu(current) )
+        clts();
+}
+
 /* Initialize FPU's context save area */
 int vcpu_init_fpu(struct vcpu *v)
 {
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -38,6 +38,7 @@ struct ix87_state {
 void vcpu_restore_fpu_eager(struct vcpu *v);
 void vcpu_restore_fpu_lazy(struct vcpu *v);
 void vcpu_save_fpu(struct vcpu *v);
+void save_fpu_enable(void);
 
 int vcpu_init_fpu(struct vcpu *v);
 void vcpu_destroy_fpu(struct vcpu *v);



Attachment: x86-EFI-runtime-permit-XMM.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.