# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5e3827f7a93a96a1d043cb0e523a0f821830af94
# Parent e3af1912794bcd4d6266e969fba3029e8d3a20ab
[HVM][VMX] Interrupts must be kept disabled when entering Xen for
external interrupt processing. Remove code that immediately
reenabled interrupt delivery on VMEXIT.
Signed-off-by: Seteven Smith <sos22@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
xen/arch/x86/hvm/vmx/vmx.c | 22 ++++++-----
xen/arch/x86/hvm/vmx/x86_32/exits.S | 35 ++++++++---------
xen/arch/x86/hvm/vmx/x86_64/exits.S | 71 +++++++++++++++++-------------------
3 files changed, 62 insertions(+), 66 deletions(-)
diff -r e3af1912794b -r 5e3827f7a93a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 05 17:03:19 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 05 17:17:27 2006 +0100
@@ -1970,7 +1970,6 @@ static inline void vmx_vmexit_do_extint(
__hvm_bug(regs);
vector &= INTR_INFO_VECTOR_MASK;
- local_irq_disable();
TRACE_VMEXIT(1,vector);
switch(vector) {
@@ -2065,30 +2064,33 @@ asmlinkage void vmx_vmexit_handler(struc
struct vcpu *v = current;
int error;
- if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
- __hvm_bug(®s);
+ error = __vmread(VM_EXIT_REASON, &exit_reason);
+ BUG_ON(error);
perfc_incra(vmexits, exit_reason);
- /* don't bother H/W interrutps */
- if (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
- exit_reason != EXIT_REASON_VMCALL &&
- exit_reason != EXIT_REASON_IO_INSTRUCTION)
+ if ( (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT) &&
+ (exit_reason != EXIT_REASON_VMCALL) &&
+ (exit_reason != EXIT_REASON_IO_INSTRUCTION) )
HVM_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
- if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
+ if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
+ local_irq_enable();
+
+ if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
+ {
printk("Failed vm entry (reason 0x%x)\n", exit_reason);
printk("*********** VMCS Area **************\n");
vmcs_dump_vcpu();
printk("**************************************\n");
domain_crash_synchronous();
- return;
}
__vmread(GUEST_RIP, &eip);
TRACE_VMEXIT(0,exit_reason);
- switch (exit_reason) {
+ switch ( exit_reason )
+ {
case EXIT_REASON_EXCEPTION_NMI:
{
/*
diff -r e3af1912794b -r 5e3827f7a93a xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Jun 05 17:03:19 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Jun 05 17:17:27 2006 +0100
@@ -55,29 +55,26 @@
* domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
*/
-#define HVM_MONITOR_EFLAGS 0x202 /* IF on */
#define NR_SKIPPED_REGS 6 /* See the above explanation */
-#define HVM_SAVE_ALL_NOSEGREGS \
- pushl $HVM_MONITOR_EFLAGS; \
- popf; \
- subl $(NR_SKIPPED_REGS*4), %esp; \
+#define HVM_SAVE_ALL_NOSEGREGS \
+ subl $(NR_SKIPPED_REGS*4), %esp; \
movl $0, 0xc(%esp); /* XXX why do we need to force eflags==0 ?? */ \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
pushl %ebx;
-#define HVM_RESTORE_ALL_NOSEGREGS \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax; \
+#define HVM_RESTORE_ALL_NOSEGREGS \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
addl $(NR_SKIPPED_REGS*4), %esp
ALIGN
diff -r e3af1912794b -r 5e3827f7a93a xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Jun 05 17:03:19 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Jun 05 17:17:27 2006 +0100
@@ -51,45 +51,42 @@
* (2/1) u32 entry_vector;
* (1/1) u32 error_code;
*/
-#define HVM_MONITOR_RFLAGS 0x202 /* IF on */
#define NR_SKIPPED_REGS 6 /* See the above explanation */
-#define HVM_SAVE_ALL_NOSEGREGS \
- pushq $HVM_MONITOR_RFLAGS; \
- popfq; \
- subq $(NR_SKIPPED_REGS*8), %rsp; \
- pushq %rdi; \
- pushq %rsi; \
- pushq %rdx; \
- pushq %rcx; \
- pushq %rax; \
- pushq %r8; \
- pushq %r9; \
- pushq %r10; \
- pushq %r11; \
- pushq %rbx; \
- pushq %rbp; \
- pushq %r12; \
- pushq %r13; \
- pushq %r14; \
- pushq %r15; \
+#define HVM_SAVE_ALL_NOSEGREGS \
+ subq $(NR_SKIPPED_REGS*8), %rsp; \
+ pushq %rdi; \
+ pushq %rsi; \
+ pushq %rdx; \
+ pushq %rcx; \
+ pushq %rax; \
+ pushq %r8; \
+ pushq %r9; \
+ pushq %r10; \
+ pushq %r11; \
+ pushq %rbx; \
+ pushq %rbp; \
+ pushq %r12; \
+ pushq %r13; \
+ pushq %r14; \
+ pushq %r15;
-#define HVM_RESTORE_ALL_NOSEGREGS \
- popq %r15; \
- popq %r14; \
- popq %r13; \
- popq %r12; \
- popq %rbp; \
- popq %rbx; \
- popq %r11; \
- popq %r10; \
- popq %r9; \
- popq %r8; \
- popq %rax; \
- popq %rcx; \
- popq %rdx; \
- popq %rsi; \
- popq %rdi; \
- addq $(NR_SKIPPED_REGS*8), %rsp; \
+#define HVM_RESTORE_ALL_NOSEGREGS \
+ popq %r15; \
+ popq %r14; \
+ popq %r13; \
+ popq %r12; \
+ popq %rbp; \
+ popq %rbx; \
+ popq %r11; \
+ popq %r10; \
+ popq %r9; \
+ popq %r8; \
+ popq %rax; \
+ popq %rcx; \
+ popq %rdx; \
+ popq %rsi; \
+ popq %rdi; \
+ addq $(NR_SKIPPED_REGS*8), %rsp;
ENTRY(vmx_asm_vmexit_handler)
/* selectors are restored/saved by VMX */
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|