ChangeSet 1.1445, 2005/05/11 21:20:57+01:00, cl349@xxxxxxxxxxxxxxxxxxxx
vmx.h, i387.h, vmx_vmcs.c, vmx_io.c, vmx.c, traps.c:
Implement a eager save/lazy restore algorithm for dealing with the
FP state of a VMX guest.
Signed-off-by: Xin B Li <xin.b.li@xxxxxxxxx>
Signed-off-by: Asit Mallick <asit.k.mallick@xxxxxxxxx>
Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>
arch/x86/traps.c | 8 +-------
arch/x86/vmx.c | 23 +++++++++++++++++++++++
arch/x86/vmx_io.c | 1 +
arch/x86/vmx_vmcs.c | 3 +++
include/asm-x86/i387.h | 12 ++++++++++++
include/asm-x86/vmx.h | 16 ++++++++++++++++
6 files changed, 56 insertions(+), 7 deletions(-)
diff -Nru a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c 2005-05-11 17:05:19 -04:00
+++ b/xen/arch/x86/traps.c 2005-05-11 17:05:19 -04:00
@@ -919,13 +919,7 @@
/* Prevent recursion. */
clts();
- if ( !test_and_set_bit(EDF_USEDFPU, ¤t->flags) )
- {
- if ( test_bit(EDF_DONEFPUINIT, ¤t->flags) )
- restore_fpu(current);
- else
- init_fpu();
- }
+ setup_fpu(current);
if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->flags) )
{
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c 2005-05-11 17:05:19 -04:00
+++ b/xen/arch/x86/vmx.c 2005-05-11 17:05:19 -04:00
@@ -154,6 +154,21 @@
return result;
}
+static void vmx_do_no_device_fault()
+{
+ unsigned long cr0;
+
+ clts();
+ setup_fpu(current);
+ __vmread(CR0_READ_SHADOW, &cr0);
+ if (!(cr0 & X86_CR0_TS)) {
+ __vmread(GUEST_CR0, &cr0);
+ cr0 &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, cr0);
+ }
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
+}
+
static void vmx_do_general_protection_fault(struct cpu_user_regs *regs)
{
unsigned long eip, error_code;
@@ -894,6 +909,9 @@
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
+ clts();
+ setup_fpu(current);
+
__vmread(GUEST_CR0, &value);
value &= ~X86_CR0_TS; /* clear TS */
__vmwrite(GUEST_CR0, value);
@@ -1093,6 +1111,11 @@
break;
}
#endif
+ case TRAP_no_device:
+ {
+ vmx_do_no_device_fault();
+ break;
+ }
case TRAP_gp_fault:
{
vmx_do_general_protection_fault(®s);
diff -Nru a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c 2005-05-11 17:05:19 -04:00
+++ b/xen/arch/x86/vmx_io.c 2005-05-11 17:05:19 -04:00
@@ -429,6 +429,7 @@
void vmx_do_resume(struct exec_domain *d)
{
+ vmx_stts();
if ( test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state) )
__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
else
diff -Nru a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c 2005-05-11 17:05:19 -04:00
+++ b/xen/arch/x86/vmx_vmcs.c 2005-05-11 17:05:19 -04:00
@@ -164,6 +164,9 @@
struct pfn_info *page;
struct cpu_user_regs *regs = get_cpu_user_regs();
+ vmx_stts();
+ set_bit(EDF_GUEST_STTS, &ed->flags);
+
cpu = smp_processor_id();
page = (struct pfn_info *) alloc_domheap_page(NULL);
diff -Nru a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h 2005-05-11 17:05:19 -04:00
+++ b/xen/include/asm-x86/i387.h 2005-05-11 17:05:19 -04:00
@@ -28,4 +28,16 @@
__asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \
} while ( 0 )
+/* Make domain the FPU owner */
+static inline void setup_fpu(struct exec_domain *ed)
+{
+ if ( !test_and_set_bit(EDF_USEDFPU, &ed->flags) )
+ {
+ if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ restore_fpu(ed);
+ else
+ init_fpu();
+ }
+}
+
#endif /* __ASM_I386_I387_H */
diff -Nru a/xen/include/asm-x86/vmx.h b/xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h 2005-05-11 17:05:19 -04:00
+++ b/xen/include/asm-x86/vmx.h 2005-05-11 17:05:19 -04:00
@@ -24,6 +24,7 @@
#include <asm/regs.h>
#include <asm/processor.h>
#include <asm/vmx_vmcs.h>
+#include <asm/i387.h>
extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_resume(void);
@@ -251,4 +252,19 @@
return 0;
}
+/* Make sure that xen intercepts any FP accesses from current */
+static inline void vmx_stts()
+{
+ unsigned long cr0;
+
+ __vmread(GUEST_CR0, &cr0);
+ if (!(cr0 & X86_CR0_TS))
+ __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
+
+ __vmread(CR0_READ_SHADOW, &cr0);
+ if (!(cr0 & X86_CR0_TS))
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP |
+ EXCEPTION_BITMAP_NM);
+}
+
#endif /* __ASM_X86_VMX_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|