[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: machine check exception handling



Properly handle MCE (connecting the exisiting, but so far unused vendor
specific handlers). HVM guests don't own CR4.MCE (and hence can't
suppress the exception) anymore, preventing silent machine shutdown.

This patch won't apply or work without the patch removing i386's NMI
deferral.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-06-18/xen/arch/x86/cpu/mcheck/k7.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/k7.c        2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/k7.c     2007-06-18 11:52:48.000000000 
+0200
@@ -16,7 +16,7 @@
 #include "mce.h"
 
 /* Machine Check Handler For AMD Athlon/Duron */
-static fastcall void k7_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static fastcall void k7_machine_check(struct cpu_user_regs * regs)
 {
        int recover=1;
        u32 alow, ahigh, high, low;
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/mce.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/mce.c       2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/mce.c    2007-06-18 11:52:48.000000000 
+0200
@@ -18,13 +18,13 @@ int mce_disabled = 0;
 int nr_mce_banks;
 
 /* Handle unconfigured int18 (should never happen) */
-static fastcall void unexpected_machine_check(struct cpu_user_regs * regs, 
long error_code)
+static fastcall void unexpected_machine_check(struct cpu_user_regs * regs)
 {      
        printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", 
smp_processor_id());
 }
 
 /* Call the installed machine check handler for this CPU setup. */
-void fastcall (*machine_check_vector)(struct cpu_user_regs *, long error_code) 
= unexpected_machine_check;
+void fastcall (*machine_check_vector)(struct cpu_user_regs *) = 
unexpected_machine_check;
 
 /* This has to be run for each processor */
 void mcheck_init(struct cpuinfo_x86 *c)
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/mce.h
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/mce.h       2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/mce.h    2007-06-18 11:52:48.000000000 
+0200
@@ -1,4 +1,5 @@
 #include <xen/init.h>
+#include <asm/processor.h>
 
 void amd_mcheck_init(struct cpuinfo_x86 *c);
 void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
@@ -6,9 +7,6 @@ void intel_p5_mcheck_init(struct cpuinfo
 void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
 void winchip_mcheck_init(struct cpuinfo_x86 *c);
 
-/* Call the installed machine check handler for this CPU setup. */
-extern fastcall void (*machine_check_vector)(struct cpu_user_regs *, long 
error_code);
-
 extern int mce_disabled __initdata;
 extern int nr_mce_banks;
 
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/p4.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/p4.c        2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/p4.c     2007-06-18 11:52:48.000000000 
+0200
@@ -158,7 +158,7 @@ done:
        return mce_num_extended_msrs;
 }
 
-static fastcall void intel_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static fastcall void intel_machine_check(struct cpu_user_regs * regs)
 {
        int recover=1;
        u32 alow, ahigh, high, low;
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/p5.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/p5.c        2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/p5.c     2007-06-18 11:52:48.000000000 
+0200
@@ -15,7 +15,7 @@
 #include "mce.h"
 
 /* Machine check handler for Pentium class Intel */
-static fastcall void pentium_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static fastcall void pentium_machine_check(struct cpu_user_regs * regs)
 {
        u32 loaddr, hi, lotype;
        rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/p6.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/p6.c        2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/p6.c     2007-06-18 11:52:48.000000000 
+0200
@@ -15,7 +15,7 @@
 #include "mce.h"
 
 /* Machine Check Handler For PII/PIII */
-static fastcall void intel_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static fastcall void intel_machine_check(struct cpu_user_regs * regs)
 {
        int recover=1;
        u32 alow, ahigh, high, low;
Index: 2007-06-18/xen/arch/x86/cpu/mcheck/winchip.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/cpu/mcheck/winchip.c   2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/arch/x86/cpu/mcheck/winchip.c        2007-06-18 
11:52:48.000000000 +0200
@@ -16,7 +16,7 @@
 #include "mce.h"
 
 /* Machine check handler for WinChip C6 */
-static fastcall void winchip_machine_check(struct cpu_user_regs * regs, long 
error_code)
+static fastcall void winchip_machine_check(struct cpu_user_regs * regs)
 {
        printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
        add_taint(TAINT_MACHINE_CHECK);
Index: 2007-06-18/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/hvm/svm/svm.c  2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/hvm/svm/svm.c       2007-06-18 11:52:49.000000000 
+0200
@@ -409,7 +409,7 @@ int svm_vmcb_restore(struct vcpu *v, str
     }
 
  skip_cr3:
-    vmcb->cr4 = c->cr4 | SVM_CR4_HOST_MASK;
+    vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
     v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
     
     vmcb->idtr.limit = c->idtr_limit;
@@ -466,7 +466,8 @@ int svm_vmcb_restore(struct vcpu *v, str
     /* update VMCB for nested paging restore */
     if ( paging_mode_hap(v->domain) ) {
         vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
         vmcb->cr3 = c->cr3;
         vmcb->np_enable = 1;
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
@@ -1826,9 +1827,19 @@ static int mov_to_cr(int gpreg, int cr, 
         break;
 
     case 4: /* CR4 */
+        if ( value & ~mmu_cr4_features )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to enable unsupported "
+                        "CR4 features %lx (host %lx)",
+                        value, mmu_cr4_features);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+            break;
+        }
+
         if ( paging_mode_hap(v->domain) )
         {
-            vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 = value;
+            v->arch.hvm_svm.cpu_shadow_cr4 = value;
+            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
             paging_update_paging_modes(v);
             /* signal paging update to ASID handler */
             svm_asid_g_update_paging (v);
@@ -1878,7 +1889,7 @@ static int mov_to_cr(int gpreg, int cr, 
         }
 
         v->arch.hvm_svm.cpu_shadow_cr4 = value;
-        vmcb->cr4 = value | SVM_CR4_HOST_MASK;
+        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
   
         /*
          * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
@@ -2265,12 +2276,13 @@ static int svm_reset_to_realmode(struct 
     vmcb->cr2 = 0;
     vmcb->efer = EFER_SVME;
 
-    vmcb->cr4 = SVM_CR4_HOST_MASK;
+    vmcb->cr4 = HVM_CR4_HOST_MASK;
     v->arch.hvm_svm.cpu_shadow_cr4 = 0;
 
     if ( paging_mode_hap(v->domain) ) {
         vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
     }
 
     /* This will jump to ROMBIOS */
@@ -2411,6 +2423,12 @@ asmlinkage void svm_vmexit_handler(struc
         break;
     }
 
+    case VMEXIT_EXCEPTION_MC:
+        HVMTRACE_0D(MCE, v);
+        svm_store_cpu_guest_regs(v, regs, NULL);
+        machine_check_vector(regs);
+        break;
+
     case VMEXIT_VINTR:
         vmcb->vintr.fields.irq = 0;
         vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
Index: 2007-06-18/xen/arch/x86/hvm/svm/vmcb.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/hvm/svm/vmcb.c 2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/hvm/svm/vmcb.c      2007-06-18 11:52:49.000000000 
+0200
@@ -224,7 +224,7 @@ static int construct_vmcb(struct vcpu *v
     /* Guest CR4. */
     arch_svm->cpu_shadow_cr4 =
         read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
-    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK;
+    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
 
     paging_update_paging_modes(v);
     vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
@@ -235,7 +235,9 @@ static int construct_vmcb(struct vcpu *v
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
+        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+        vmcb->exception_intercepts = HVM_TRAP_MASK;
 
         /* No point in intercepting CR0/3/4 reads, because the hardware 
          * will return the guest versions anyway. */
@@ -249,7 +251,7 @@ static int construct_vmcb(struct vcpu *v
     }
     else
     {
-        vmcb->exception_intercepts = 1U << TRAP_page_fault;
+        vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_page_fault);
     }
 
     return 0;
Index: 2007-06-18/xen/arch/x86/hvm/vmx/vmcs.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/hvm/vmx/vmcs.c 2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/hvm/vmx/vmcs.c      2007-06-18 11:52:49.000000000 
+0200
@@ -421,7 +421,7 @@ static void construct_vmcs(struct vcpu *
     __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
 #endif
 
-    __vmwrite(EXCEPTION_BITMAP, 1U << TRAP_page_fault);
+    __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
 
     /* Guest CR0. */
     cr0 = read_cr0();
Index: 2007-06-18/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/hvm/vmx/vmx.c  2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/hvm/vmx/vmx.c       2007-06-18 11:52:49.000000000 
+0200
@@ -615,7 +615,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
     }
 #endif
 
-    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
     v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
     __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -1986,7 +1986,7 @@ static int vmx_world_restore(struct vcpu
     else
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
 
-    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
     v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
     __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -2385,6 +2385,14 @@ static int mov_to_cr(int gp, int cr, str
     case 4: /* CR4 */
         old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
 
+        if ( value & ~mmu_cr4_features )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to enable unsupported "
+                        "CR4 features %lx (host %lx)",
+                        value, mmu_cr4_features);
+            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+            break;
+        }
         if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
         {
             if ( vmx_pgbit_test(v) )
@@ -2425,7 +2433,7 @@ static int mov_to_cr(int gp, int cr, str
             }
         }
 
-        __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
+        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
         v->arch.hvm_vmx.cpu_shadow_cr4 = value;
         __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -2797,7 +2805,8 @@ static void vmx_reflect_exception(struct
     }
 }
 
-static void vmx_failed_vmentry(unsigned int exit_reason)
+static void vmx_failed_vmentry(unsigned int exit_reason,
+                               struct cpu_user_regs *regs)
 {
     unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
     unsigned long exit_qualification;
@@ -2814,6 +2823,9 @@ static void vmx_failed_vmentry(unsigned 
         break;
     case EXIT_REASON_MACHINE_CHECK:
         printk("caused by machine check.\n");
+        HVMTRACE_0D(MCE, current);
+        vmx_store_cpu_guest_regs(current, regs, NULL);
+        machine_check_vector(regs);
         break;
     default:
         printk("reason not known yet!");
@@ -2839,11 +2851,12 @@ asmlinkage void vmx_vmexit_handler(struc
 
     perfc_incra(vmexits, exit_reason);
 
-    if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
-        local_irq_enable();
-
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
-        return vmx_failed_vmentry(exit_reason);
+        return vmx_failed_vmentry(exit_reason, regs);
+
+    if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT &&
+         exit_reason != EXIT_REASON_EXCEPTION_NMI )
+        local_irq_enable();
 
     switch ( exit_reason )
     {
@@ -2863,6 +2876,9 @@ asmlinkage void vmx_vmexit_handler(struc
 
         perfc_incra(cause_vector, vector);
 
+        if ( vector != TRAP_nmi && vector != TRAP_machine_check )
+            local_irq_enable();
+
         switch ( vector )
         {
         case TRAP_debug:
@@ -2900,6 +2916,11 @@ asmlinkage void vmx_vmexit_handler(struc
             else
                 vmx_reflect_exception(v);
             break;
+        case TRAP_machine_check:
+            HVMTRACE_0D(MCE, v);
+            vmx_store_cpu_guest_regs(v, regs, NULL);
+            machine_check_vector(regs);
+            break;
         default:
             goto exit_and_crash;
         }
Index: 2007-06-18/xen/arch/x86/traps.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/traps.c        2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/traps.c     2007-06-18 11:52:49.000000000 +0200
@@ -721,12 +721,6 @@ asmlinkage int do_int3(struct cpu_user_r
     return do_guest_trap(TRAP_int3, regs, 0);
 }
 
-asmlinkage int do_machine_check(struct cpu_user_regs *regs)
-{
-    fatal_trap(TRAP_machine_check, regs);
-    return 0;
-}
-
 void propagate_page_fault(unsigned long addr, u16 error_code)
 {
     struct trap_info *ti;
Index: 2007-06-18/xen/arch/x86/x86_32/entry.S
===================================================================
--- 2007-06-18.orig/xen/arch/x86/x86_32/entry.S 2007-06-18 11:51:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/x86_32/entry.S      2007-06-18 11:54:00.000000000 
+0200
@@ -557,10 +557,6 @@ ENTRY(page_fault)
         movw  $TRAP_page_fault,2(%esp)
         jmp   handle_exception
 
-ENTRY(machine_check)
-        pushl $TRAP_machine_check<<16
-        jmp   handle_exception
-
 ENTRY(spurious_interrupt_bug)
         pushl $TRAP_spurious_int<<16
         jmp   handle_exception
@@ -614,6 +610,46 @@ ENTRY(nmi)
         jmp   .Lnmi_common
 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
 
+ENTRY(machine_check)
+        # See NMI handler for explanations.
+#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
+        iret
+#else
+        pushl $TRAP_machine_check<<16
+        SAVE_ALL(.Lmce_xen,.Lmce_common)
+.Lmce_common:
+        movl  %esp,%eax
+        pushl %eax
+        call  *machine_check_vector
+        addl  $4,%esp
+        jmp   ret_from_intr
+.Lmce_xen:
+        GET_GUEST_REGS(%ebx)
+        testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
+        mov   %ds,%eax
+        mov   %es,%edx
+        jnz   .Lmce_vm86
+        cmpw  %ax,%cx
+        mov   %ecx,%ds
+        cmovel UREGS_ds(%ebx),%eax
+        cmpw  %dx,%cx
+        movl  %eax,UREGS_ds(%ebx)
+        cmovel UREGS_es(%ebx),%edx
+        mov   %ecx,%es
+        movl  $.Lrestore_sregs_guest,%ecx
+        movl  %edx,UREGS_es(%ebx)
+        cmpl  %ecx,UREGS_eip(%esp)
+        jbe   .Lmce_common
+        cmpl  $.Lrestore_iret_guest,UREGS_eip(%esp)
+        ja    .Lmce_common
+        movl  %ecx,UREGS_eip(%esp)
+        jmp   .Lmce_common
+.Lmce_vm86:
+        mov   %ecx,%ds
+        mov   %ecx,%es
+        jmp   .Lmce_common
+#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
+
 ENTRY(setup_vm86_frame)
         mov %ecx,%ds
         mov %ecx,%es
@@ -650,7 +686,7 @@ ENTRY(exception_table)
         .long do_spurious_interrupt_bug
         .long do_coprocessor_error
         .long do_alignment_check
-        .long do_machine_check
+        .long 0 # machine_check
         .long do_simd_coprocessor_error
 
 ENTRY(hypercall_table)
Index: 2007-06-18/xen/arch/x86/x86_64/entry.S
===================================================================
--- 2007-06-18.orig/xen/arch/x86/x86_64/entry.S 2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/x86_64/entry.S      2007-06-18 11:52:49.000000000 
+0200
@@ -505,11 +505,6 @@ ENTRY(page_fault)
         movl  $TRAP_page_fault,4(%rsp)
         jmp   handle_exception
 
-ENTRY(machine_check)
-        pushq $0
-        movl  $TRAP_machine_check,4(%rsp)
-        jmp   handle_exception
-
 ENTRY(spurious_interrupt_bug)
         pushq $0
         movl  $TRAP_spurious_int,4(%rsp)
@@ -546,6 +541,23 @@ nmi_in_hypervisor_mode:
         call  do_nmi
         jmp   ret_from_intr
 
+ENTRY(machine_check)
+        pushq $0
+        movl  $TRAP_machine_check,4(%rsp)
+        SAVE_ALL
+        testb $3,UREGS_cs(%rsp)
+        jz    .Lmce_in_hypervisor_mode
+        /* Interrupted guest context. Copy the context to stack bottom. */
+        GET_GUEST_REGS(%rdi)
+        movq  %rsp,%rsi
+        movl  $UREGS_kernel_sizeof/8,%ecx
+        movq  %rdi,%rsp
+        rep movsq
+.Lmce_in_hypervisor_mode:
+        movq  %rsp,%rdi
+        call  *machine_check_vector(%rip)
+        jmp   ret_from_intr
+
 .data
 
 ENTRY(exception_table)
@@ -567,7 +579,7 @@ ENTRY(exception_table)
         .quad do_spurious_interrupt_bug
         .quad do_coprocessor_error
         .quad do_alignment_check
-        .quad do_machine_check
+        .quad 0 # machine_check
         .quad do_simd_coprocessor_error
 
 ENTRY(hypercall_table)
Index: 2007-06-18/xen/arch/x86/x86_64/traps.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/x86_64/traps.c 2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/x86_64/traps.c      2007-06-18 11:52:49.000000000 
+0200
@@ -296,6 +296,7 @@ void __init percpu_traps_init(void)
         set_intr_gate(TRAP_double_fault, &double_fault);
         idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
         idt_table[TRAP_nmi].a          |= 2UL << 32; /* IST2 */
+        idt_table[TRAP_machine_check].a|= 3UL << 32; /* IST3 */
 
         /*
          * The 32-on-64 hypercall entry vector is only accessible from ring 1.
@@ -310,7 +311,10 @@ void __init percpu_traps_init(void)
     stack_bottom = (char *)get_stack_bottom();
     stack        = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
 
-    /* Double-fault handler has its own per-CPU 2kB stack. */
+    /* Machine Check handler has its own per-CPU 1kB stack. */
+    init_tss[cpu].ist[2] = (unsigned long)&stack[1024];
+
+    /* Double-fault handler has its own per-CPU 1kB stack. */
     init_tss[cpu].ist[0] = (unsigned long)&stack[2048];
 
     /* NMI handler has its own per-CPU 1kB stack. */
Index: 2007-06-18/xen/include/asm-x86/hvm/hvm.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/hvm/hvm.h       2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/hvm/hvm.h    2007-06-18 11:52:49.000000000 
+0200
@@ -279,4 +279,11 @@ static inline int hvm_event_injection_fa
     return hvm_funcs.event_injection_faulted(v);
 }
 
+/* These bits in the CR4 are owned by the host */
+#define HVM_CR4_HOST_MASK (mmu_cr4_features & \
+    (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
+
+/* These exceptions must always be intercepted. */
+#define HVM_TRAP_MASK (1U << TRAP_machine_check)
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
Index: 2007-06-18/xen/include/asm-x86/hvm/svm/vmcb.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/hvm/svm/vmcb.h  2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/hvm/svm/vmcb.h       2007-06-18 
11:52:49.000000000 +0200
@@ -464,14 +464,6 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-/* These bits in the CR4 are owned by the host */
-#if CONFIG_PAGING_LEVELS >= 3
-#define SVM_CR4_HOST_MASK (X86_CR4_PAE)
-#else
-#define SVM_CR4_HOST_MASK 0
-#endif
-
-
 #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
 
 /*
Index: 2007-06-18/xen/include/asm-x86/hvm/trace.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/hvm/trace.h     2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/hvm/trace.h  2007-06-18 11:52:49.000000000 
+0200
@@ -21,6 +21,7 @@
 #define DO_TRC_HVM_CPUID       1
 #define DO_TRC_HVM_INTR        1
 #define DO_TRC_HVM_NMI         1
+#define DO_TRC_HVM_MCE         1
 #define DO_TRC_HVM_SMI         1
 #define DO_TRC_HVM_VMMCALL     1
 #define DO_TRC_HVM_HLT         1
Index: 2007-06-18/xen/include/asm-x86/hvm/vmx/vmx.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/hvm/vmx/vmx.h   2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/hvm/vmx/vmx.h        2007-06-18 
11:52:49.000000000 +0200
@@ -143,13 +143,6 @@ void vmx_vlapic_msr_changed(struct vcpu 
 #define X86_SEG_AR_GRANULARITY  (1u << 15) /* 15, granularity */
 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
 
-/* These bits in the CR4 are owned by the host */
-#if CONFIG_PAGING_LEVELS >= 3
-#define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
-#else
-#define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
-#endif
-
 #define VMCALL_OPCODE   ".byte 0x0f,0x01,0xc1\n"
 #define VMCLEAR_OPCODE  ".byte 0x66,0x0f,0xc7\n"        /* reg/opcode: /6 */
 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
Index: 2007-06-18/xen/include/asm-x86/processor.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/processor.h     2007-06-18 
11:48:56.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/processor.h  2007-06-18 11:52:49.000000000 
+0200
@@ -567,6 +567,7 @@ extern void mtrr_ap_init(void);
 extern void mtrr_bp_init(void);
 
 extern void mcheck_init(struct cpuinfo_x86 *c);
+extern asmlinkage void (*machine_check_vector)(struct cpu_user_regs *);
 
 int cpuid_hypervisor_leaves(
     uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
Index: 2007-06-18/xen/include/public/trace.h
===================================================================
--- 2007-06-18.orig/xen/include/public/trace.h  2007-06-18 11:48:56.000000000 
+0200
+++ 2007-06-18/xen/include/public/trace.h       2007-06-18 11:52:49.000000000 
+0200
@@ -88,6 +88,7 @@
 #define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
 #define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
 #define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_MCE             (TRC_HVM_HANDLER + 0x15)
 
 /* This structure represents a single trace buffer record. */
 struct t_rec {



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.