WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: machine check exception handling

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: machine check exception handling
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 21 Jun 2007 16:40:29 -0700
Delivery-date: Thu, 21 Jun 2007 16:39:03 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1182435004 -3600
# Node ID 3cf5052ba5e50555eb4893b2576859853d1b9523
# Parent  899a44cb6ef605d0920acf68cfd55c72b112c033
x86: machine check exception handling

Properly handle MCE (connecting the exisiting, but so far unused
vendor specific handlers). HVM guests don't own CR4.MCE (and hence
can't suppress the exception) anymore, preventing silent machine
shutdown.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         |   44 ++++++++++++++++++++++++++---------
 xen/arch/x86/hvm/svm/vmcb.c        |    8 ++++--
 xen/arch/x86/hvm/vmx/vmcs.c        |    2 -
 xen/arch/x86/hvm/vmx/vmx.c         |   32 ++++++++++++++++++++-----
 xen/arch/x86/traps.c               |   11 ++++----
 xen/arch/x86/x86_32/entry.S        |   46 ++++++++++++++++++++-----------------
 xen/arch/x86/x86_64/entry.S        |   40 ++++++++++++++++----------------
 xen/arch/x86/x86_64/traps.c        |   10 +++++---
 xen/include/asm-x86/hvm/hvm.h      |    7 +++++
 xen/include/asm-x86/hvm/svm/vmcb.h |    8 ------
 xen/include/asm-x86/hvm/trace.h    |    1 
 xen/include/asm-x86/hvm/vmx/vmx.h  |    7 -----
 xen/include/asm-x86/processor.h    |    3 +-
 xen/include/public/trace.h         |    1 
 14 files changed, 136 insertions(+), 84 deletions(-)

diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Jun 21 15:10:04 2007 +0100
@@ -391,7 +391,7 @@ int svm_vmcb_restore(struct vcpu *v, str
     }
 
  skip_cr3:
-    vmcb->cr4 = c->cr4 | SVM_CR4_HOST_MASK;
+    vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
     v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
     
     vmcb->idtr.limit = c->idtr_limit;
@@ -448,7 +448,8 @@ int svm_vmcb_restore(struct vcpu *v, str
     /* update VMCB for nested paging restore */
     if ( paging_mode_hap(v->domain) ) {
         vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
         vmcb->cr3 = c->cr3;
         vmcb->np_enable = 1;
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
@@ -805,8 +806,10 @@ static void svm_ctxt_switch_from(struct 
         : : "a" (__pa(root_vmcb[cpu])) );
 
 #ifdef __x86_64__
-    /* Resume use of IST2 for NMIs now that the host TR is reinstated. */
-    idt_tables[cpu][TRAP_nmi].a |= 2UL << 32;
+    /* Resume use of ISTs now that the host TR is reinstated. */
+    idt_tables[cpu][TRAP_double_fault].a  |= 1UL << 32; /* IST1 */
+    idt_tables[cpu][TRAP_nmi].a           |= 2UL << 32; /* IST2 */
+    idt_tables[cpu][TRAP_machine_check].a |= 3UL << 32; /* IST3 */
 #endif
 }
 
@@ -826,10 +829,12 @@ static void svm_ctxt_switch_to(struct vc
     set_segment_register(ss, 0);
 
     /*
-     * Cannot use IST2 for NMIs while we are running with the guest TR. But
-     * this doesn't matter: the IST is only needed to handle SYSCALL/SYSRET.
+     * Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR.
+     * But this doesn't matter: the IST is only req'd to handle SYSCALL/SYSRET.
      */
-    idt_tables[cpu][TRAP_nmi].a &= ~(2UL << 32);
+    idt_tables[cpu][TRAP_double_fault].a  &= ~(3UL << 32);
+    idt_tables[cpu][TRAP_nmi].a           &= ~(3UL << 32);
+    idt_tables[cpu][TRAP_machine_check].a &= ~(3UL << 32);
 #endif
 
     svm_restore_dr(v);
@@ -1823,9 +1828,19 @@ static int mov_to_cr(int gpreg, int cr, 
         break;
 
     case 4: /* CR4 */
+        if ( value & ~mmu_cr4_features )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to enable unsupported "
+                        "CR4 features %lx (host %lx)",
+                        value, mmu_cr4_features);
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+            break;
+        }
+
         if ( paging_mode_hap(v->domain) )
         {
-            vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 = value;
+            v->arch.hvm_svm.cpu_shadow_cr4 = value;
+            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
             paging_update_paging_modes(v);
             /* signal paging update to ASID handler */
             svm_asid_g_update_paging (v);
@@ -1875,7 +1890,7 @@ static int mov_to_cr(int gpreg, int cr, 
         }
 
         v->arch.hvm_svm.cpu_shadow_cr4 = value;
-        vmcb->cr4 = value | SVM_CR4_HOST_MASK;
+        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
   
         /*
          * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
@@ -2265,12 +2280,13 @@ static int svm_reset_to_realmode(struct 
     vmcb->cr2 = 0;
     vmcb->efer = EFER_SVME;
 
-    vmcb->cr4 = SVM_CR4_HOST_MASK;
+    vmcb->cr4 = HVM_CR4_HOST_MASK;
     v->arch.hvm_svm.cpu_shadow_cr4 = 0;
 
     if ( paging_mode_hap(v->domain) ) {
         vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
     }
 
     /* This will jump to ROMBIOS */
@@ -2411,6 +2427,12 @@ asmlinkage void svm_vmexit_handler(struc
         break;
     }
 
+    case VMEXIT_EXCEPTION_MC:
+        HVMTRACE_0D(MCE, v);
+        svm_store_cpu_guest_regs(v, regs, NULL);
+        do_machine_check(regs);
+        break;
+
     case VMEXIT_VINTR:
         vmcb->vintr.fields.irq = 0;
         vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu Jun 21 15:10:04 2007 +0100
@@ -224,7 +224,7 @@ static int construct_vmcb(struct vcpu *v
     /* Guest CR4. */
     arch_svm->cpu_shadow_cr4 =
         read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
-    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK;
+    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
 
     paging_update_paging_modes(v);
     vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
@@ -235,7 +235,9 @@ static int construct_vmcb(struct vcpu *v
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
+        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
+                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+        vmcb->exception_intercepts = HVM_TRAP_MASK;
 
         /* No point in intercepting CR0/3/4 reads, because the hardware 
          * will return the guest versions anyway. */
@@ -249,7 +251,7 @@ static int construct_vmcb(struct vcpu *v
     }
     else
     {
-        vmcb->exception_intercepts = 1U << TRAP_page_fault;
+        vmcb->exception_intercepts = HVM_TRAP_MASK | (1U << TRAP_page_fault);
     }
 
     return 0;
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Jun 21 15:10:04 2007 +0100
@@ -421,7 +421,7 @@ static void construct_vmcs(struct vcpu *
     __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
 #endif
 
-    __vmwrite(EXCEPTION_BITMAP, 1U << TRAP_page_fault);
+    __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
 
     /* Guest CR0. */
     cr0 = read_cr0();
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 21 15:10:04 2007 +0100
@@ -615,7 +615,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
     }
 #endif
 
-    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
     v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
     __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -2001,7 +2001,7 @@ static int vmx_world_restore(struct vcpu
     else
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
 
-    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
     v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
     __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -2400,6 +2400,14 @@ static int mov_to_cr(int gp, int cr, str
     case 4: /* CR4 */
         old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
 
+        if ( value & ~mmu_cr4_features )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Guest attempts to enable unsupported "
+                        "CR4 features %lx (host %lx)",
+                        value, mmu_cr4_features);
+            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+            break;
+        }
         if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
         {
             if ( vmx_pgbit_test(v) )
@@ -2440,7 +2448,7 @@ static int mov_to_cr(int gp, int cr, str
             }
         }
 
-        __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
+        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
         v->arch.hvm_vmx.cpu_shadow_cr4 = value;
         __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
@@ -2826,7 +2834,8 @@ static void vmx_reflect_exception(struct
     }
 }
 
-static void vmx_failed_vmentry(unsigned int exit_reason)
+static void vmx_failed_vmentry(unsigned int exit_reason,
+                               struct cpu_user_regs *regs)
 {
     unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
     unsigned long exit_qualification;
@@ -2843,6 +2852,9 @@ static void vmx_failed_vmentry(unsigned 
         break;
     case EXIT_REASON_MACHINE_CHECK:
         printk("caused by machine check.\n");
+        HVMTRACE_0D(MCE, current);
+        vmx_store_cpu_guest_regs(current, regs, NULL);
+        do_machine_check(regs);
         break;
     default:
         printk("reason not known yet!");
@@ -2872,7 +2884,7 @@ asmlinkage void vmx_vmexit_handler(struc
         local_irq_enable();
 
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
-        return vmx_failed_vmentry(exit_reason);
+        return vmx_failed_vmentry(exit_reason, regs);
 
     switch ( exit_reason )
     {
@@ -2923,11 +2935,19 @@ asmlinkage void vmx_vmexit_handler(struc
             vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
             break;
         case TRAP_nmi:
-            HVMTRACE_0D(NMI, v);
             if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
+            {
+                HVMTRACE_0D(NMI, v);
+                vmx_store_cpu_guest_regs(v, regs, NULL);
                 do_nmi(regs); /* Real NMI, vector 2: normal processing. */
+            }
             else
                 vmx_reflect_exception(v);
+            break;
+        case TRAP_machine_check:
+            HVMTRACE_0D(MCE, v);
+            vmx_store_cpu_guest_regs(v, regs, NULL);
+            do_machine_check(regs);
             break;
         default:
             goto exit_and_crash;
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/traps.c      Thu Jun 21 15:10:04 2007 +0100
@@ -86,6 +86,7 @@ asmlinkage int do_ ## _name(struct cpu_u
 asmlinkage int do_ ## _name(struct cpu_user_regs *regs)
 
 asmlinkage void nmi(void);
+asmlinkage void machine_check(void);
 DECLARE_TRAP_HANDLER(divide_error);
 DECLARE_TRAP_HANDLER(debug);
 DECLARE_TRAP_HANDLER(int3);
@@ -103,7 +104,6 @@ DECLARE_TRAP_HANDLER(simd_coprocessor_er
 DECLARE_TRAP_HANDLER(simd_coprocessor_error);
 DECLARE_TRAP_HANDLER(alignment_check);
 DECLARE_TRAP_HANDLER(spurious_interrupt_bug);
-DECLARE_TRAP_HANDLER(machine_check);
 
 long do_set_debugreg(int reg, unsigned long value);
 unsigned long do_get_debugreg(int reg);
@@ -731,10 +731,11 @@ asmlinkage int do_int3(struct cpu_user_r
     return do_guest_trap(TRAP_int3, regs, 0);
 }
 
-asmlinkage int do_machine_check(struct cpu_user_regs *regs)
-{
-    fatal_trap(TRAP_machine_check, regs);
-    return 0;
+asmlinkage void do_machine_check(struct cpu_user_regs *regs)
+{
+    extern fastcall void (*machine_check_vector)(
+        struct cpu_user_regs *, long error_code);
+    machine_check_vector(regs, regs->error_code);
 }
 
 void propagate_page_fault(unsigned long addr, u16 error_code)
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/x86_32/entry.S       Thu Jun 21 15:10:04 2007 +0100
@@ -534,10 +534,6 @@ ENTRY(page_fault)
         movw  $TRAP_page_fault,2(%esp)
         jmp   handle_exception
 
-ENTRY(machine_check)
-        pushl $TRAP_machine_check<<16
-        jmp   handle_exception
-
 ENTRY(spurious_interrupt_bug)
         pushl $TRAP_spurious_int<<16
         jmp   handle_exception
@@ -550,18 +546,20 @@ 1:      movl  %esp,%eax
         addl  $4,%esp
         jmp   restore_all_xen
 
-ENTRY(nmi)
+handle_nmi_mce:
 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
-        # NMI entry protocol is incompatible with guest kernel in ring 0.
+        # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
+        addl  $4,%esp
         iret
 #else
         # Save state but do not trash the segment registers!
-        pushl $TRAP_nmi<<16
-        SAVE_ALL(.Lnmi_xen,.Lnmi_common)
-.Lnmi_common:
-        movl  %esp,%eax
-        pushl %eax
-        call  do_nmi
+        SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
+.Lnmi_mce_common:
+        xorl  %eax,%eax
+        movw  UREGS_entry_vector(%esp),%ax
+        movl  %esp,%edx
+        pushl %edx
+        call  *exception_table(,%eax,4)
         addl  $4,%esp
         /* 
          * NB. We may return to Xen context with polluted %ds/%es. But in such
@@ -569,13 +567,13 @@ ENTRY(nmi)
          * be detected by SAVE_ALL(), or we have rolled back restore_guest.
          */
         jmp   ret_from_intr
-.Lnmi_xen:
+.Lnmi_mce_xen:
         /* Check the outer (guest) context for %ds/%es state validity. */
         GET_GUEST_REGS(%ebx)
         testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
         mov   %ds,%eax
         mov   %es,%edx
-        jnz   .Lnmi_vm86
+        jnz   .Lnmi_mce_vm86
         /* We may have interrupted Xen while messing with %ds/%es... */
         cmpw  %ax,%cx
         mov   %ecx,%ds             /* Ensure %ds is valid */
@@ -587,18 +585,26 @@ ENTRY(nmi)
         movl  $.Lrestore_sregs_guest,%ecx
         movl  %edx,UREGS_es(%ebx)  /* Ensure guest frame contains guest ES */
         cmpl  %ecx,UREGS_eip(%esp)
-        jbe   .Lnmi_common
+        jbe   .Lnmi_mce_common
         cmpl  $.Lrestore_iret_guest,UREGS_eip(%esp)
-        ja    .Lnmi_common
+        ja    .Lnmi_mce_common
         /* Roll outer context restore_guest back to restoring %ds/%es. */
         movl  %ecx,UREGS_eip(%esp)
-        jmp   .Lnmi_common
-.Lnmi_vm86:
+        jmp   .Lnmi_mce_common
+.Lnmi_mce_vm86:
         /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
         mov   %ecx,%ds
         mov   %ecx,%es
-        jmp   .Lnmi_common
+        jmp   .Lnmi_mce_common
 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
+
+ENTRY(nmi)
+        pushl $TRAP_nmi<<16
+        jmp   handle_nmi_mce
+
+ENTRY(machine_check)
+        pushl $TRAP_machine_check<<16
+        jmp   handle_nmi_mce
 
 ENTRY(setup_vm86_frame)
         mov %ecx,%ds
@@ -620,7 +626,7 @@ ENTRY(exception_table)
 ENTRY(exception_table)
         .long do_divide_error
         .long do_debug
-        .long 0 # nmi
+        .long do_nmi
         .long do_int3
         .long do_overflow
         .long do_bounds
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/x86_64/entry.S       Thu Jun 21 15:10:04 2007 +0100
@@ -505,11 +505,6 @@ ENTRY(page_fault)
         movl  $TRAP_page_fault,4(%rsp)
         jmp   handle_exception
 
-ENTRY(machine_check)
-        pushq $0
-        movl  $TRAP_machine_check,4(%rsp)
-        jmp   handle_exception
-
 ENTRY(spurious_interrupt_bug)
         pushq $0
         movl  $TRAP_spurious_int,4(%rsp)
@@ -527,31 +522,38 @@ ENTRY(early_page_fault)
         call  do_early_page_fault
         jmp   restore_all_xen
 
-ENTRY(nmi)
-        pushq $0
+handle_ist_exception:
         SAVE_ALL
         testb $3,UREGS_cs(%rsp)
-        jz    nmi_in_hypervisor_mode
+        jz    1f
         /* Interrupted guest context. Copy the context to stack bottom. */
-        GET_GUEST_REGS(%rbx)
+        GET_GUEST_REGS(%rdi)
+        movq  %rsp,%rsi
         movl  $UREGS_kernel_sizeof/8,%ecx
-1:      popq  %rax
-        movq  %rax,(%rbx)
-        addq  $8,%rbx
-        loop  1b
-        subq  $UREGS_kernel_sizeof,%rbx
-        movq  %rbx,%rsp
-nmi_in_hypervisor_mode:
-        movq  %rsp,%rdi
-        call  do_nmi
+        movq  %rdi,%rsp
+        rep   movsq
+1:      movq  %rsp,%rdi
+        movl  UREGS_entry_vector(%rsp),%eax
+        leaq  exception_table(%rip),%rdx
+        callq *(%rdx,%rax,8)
         jmp   ret_from_intr
+
+ENTRY(nmi)
+        pushq $0
+        movl  $TRAP_nmi,4(%rsp)
+        jmp   handle_ist_exception
+
+ENTRY(machine_check)
+        pushq $0
+        movl  $TRAP_machine_check,4(%rsp)
+        jmp   handle_ist_exception
 
 .data
 
 ENTRY(exception_table)
         .quad do_divide_error
         .quad do_debug
-        .quad 0 # nmi
+        .quad do_nmi
         .quad do_int3
         .quad do_overflow
         .quad do_bounds
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/arch/x86/x86_64/traps.c       Thu Jun 21 15:10:04 2007 +0100
@@ -294,8 +294,9 @@ void __init percpu_traps_init(void)
     {
         /* Specify dedicated interrupt stacks for NMIs and double faults. */
         set_intr_gate(TRAP_double_fault, &double_fault);
-        idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
-        idt_table[TRAP_nmi].a          |= 2UL << 32; /* IST2 */
+        idt_table[TRAP_double_fault].a  |= 1UL << 32; /* IST1 */
+        idt_table[TRAP_nmi].a           |= 2UL << 32; /* IST2 */
+        idt_table[TRAP_machine_check].a |= 3UL << 32; /* IST3 */
 
         /*
          * The 32-on-64 hypercall entry vector is only accessible from ring 1.
@@ -310,7 +311,10 @@ void __init percpu_traps_init(void)
     stack_bottom = (char *)get_stack_bottom();
     stack        = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
 
-    /* Double-fault handler has its own per-CPU 2kB stack. */
+    /* Machine Check handler has its own per-CPU 1kB stack. */
+    init_tss[cpu].ist[2] = (unsigned long)&stack[1024];
+
+    /* Double-fault handler has its own per-CPU 1kB stack. */
     init_tss[cpu].ist[0] = (unsigned long)&stack[2048];
 
     /* NMI handler has its own per-CPU 1kB stack. */
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Thu Jun 21 15:10:04 2007 +0100
@@ -302,4 +302,11 @@ static inline int hvm_event_injection_fa
     return hvm_funcs.event_injection_faulted(v);
 }
 
+/* These bits in the CR4 are owned by the host */
+#define HVM_CR4_HOST_MASK (mmu_cr4_features & \
+    (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
+
+/* These exceptions must always be intercepted. */
+#define HVM_TRAP_MASK (1U << TRAP_machine_check)
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Thu Jun 21 15:10:04 2007 +0100
@@ -464,14 +464,6 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-/* These bits in the CR4 are owned by the host */
-#if CONFIG_PAGING_LEVELS >= 3
-#define SVM_CR4_HOST_MASK (X86_CR4_PAE)
-#else
-#define SVM_CR4_HOST_MASK 0
-#endif
-
-
 #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
 
 /*
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/asm-x86/hvm/trace.h
--- a/xen/include/asm-x86/hvm/trace.h   Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/asm-x86/hvm/trace.h   Thu Jun 21 15:10:04 2007 +0100
@@ -21,6 +21,7 @@
 #define DO_TRC_HVM_CPUID       1
 #define DO_TRC_HVM_INTR        1
 #define DO_TRC_HVM_NMI         1
+#define DO_TRC_HVM_MCE         1
 #define DO_TRC_HVM_SMI         1
 #define DO_TRC_HVM_VMMCALL     1
 #define DO_TRC_HVM_HLT         1
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Jun 21 15:10:04 2007 +0100
@@ -143,13 +143,6 @@ void vmx_vlapic_msr_changed(struct vcpu 
 #define X86_SEG_AR_GRANULARITY  (1u << 15) /* 15, granularity */
 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
 
-/* These bits in the CR4 are owned by the host */
-#if CONFIG_PAGING_LEVELS >= 3
-#define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
-#else
-#define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
-#endif
-
 #define VMCALL_OPCODE   ".byte 0x0f,0x01,0xc1\n"
 #define VMCLEAR_OPCODE  ".byte 0x66,0x0f,0xc7\n"        /* reg/opcode: /6 */
 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/asm-x86/processor.h   Thu Jun 21 15:10:04 2007 +0100
@@ -566,7 +566,8 @@ extern void mtrr_ap_init(void);
 extern void mtrr_ap_init(void);
 extern void mtrr_bp_init(void);
 
-extern void mcheck_init(struct cpuinfo_x86 *c);
+void mcheck_init(struct cpuinfo_x86 *c);
+asmlinkage void do_machine_check(struct cpu_user_regs *regs);
 
 int cpuid_hypervisor_leaves(
     uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
diff -r 899a44cb6ef6 -r 3cf5052ba5e5 xen/include/public/trace.h
--- a/xen/include/public/trace.h        Thu Jun 21 14:03:57 2007 +0100
+++ b/xen/include/public/trace.h        Thu Jun 21 15:10:04 2007 +0100
@@ -88,6 +88,7 @@
 #define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
 #define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
 #define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_MCE             (TRC_HVM_HANDLER + 0x15)
 
 /* This structure represents a single trace buffer record. */
 struct t_rec {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: machine check exception handling, Xen patchbot-unstable <=