WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm vmx: Support 'virtual NMI' feature of

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm vmx: Support 'virtual NMI' feature of VMX.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 03 Jul 2007 13:37:08 -0700
Delivery-date: Tue, 03 Jul 2007 13:35:27 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1183484760 -3600
# Node ID e6d5e4709466b66146d2538574df9704ecb9a5e1
# Parent  9fa9346e1c700d0ea81a99318c564c4b9bccaa8a
hvm vmx: Support 'virtual NMI' feature of VMX.
Signed-off-by: Haitao Shan <haitao.shan@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Signed-off-by: Dexuan Cui <dexuan.cui@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/intr.c        |   65 ++++++++++++++++++++++++++-----------
 xen/arch/x86/hvm/vmx/vmcs.c        |    2 -
 xen/arch/x86/hvm/vmx/vmx.c         |   21 +++++++++--
 xen/include/asm-x86/hvm/vmx/vmcs.h |    2 +
 xen/include/asm-x86/hvm/vmx/vmx.h  |    2 +
 5 files changed, 69 insertions(+), 23 deletions(-)

diff -r 9fa9346e1c70 -r e6d5e4709466 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Tue Jul 03 17:22:17 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Tue Jul 03 18:46:00 2007 +0100
@@ -71,13 +71,38 @@
  * the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
  */
 
-static void enable_irq_window(struct vcpu *v)
-{
-    u32  *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
-    
-    if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
-    {
-        *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
+{
+    u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
+    u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
+
+    if ( unlikely(intr_source == hvm_intack_none) )
+        return;
+
+    if ( unlikely(intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi )
+    {
+        /*
+         * We set MOV-SS blocking in lieu of STI blocking when delivering an
+         * NMI. This is because it is processor-specific whether STI-blocking
+         * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
+         * (otherwise vmentry will fail on processors that check for STI-
+         * blocking) but if the processor does not check for STI-blocking then
+         * we may immediately vmexit and hance make no progress!
+         * (see SDM 3B 21.3, "Other Causes of VM Exits").
+         */
+        u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+        if ( intr_shadow & VMX_INTR_SHADOW_STI )
+        {
+            /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
+            intr_shadow &= ~VMX_INTR_SHADOW_STI;
+            intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
+        }
+        ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
+    }
+
+    if ( !(*cpu_exec_control & ctl) )
+    {
+        *cpu_exec_control |= ctl;
         __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
     }
 }
@@ -120,8 +145,7 @@ asmlinkage void vmx_intr_assist(void)
         if ( unlikely(v->arch.hvm_vmx.vector_injected) )
         {
             v->arch.hvm_vmx.vector_injected = 0;
-            if ( unlikely(intr_source != hvm_intack_none) )
-                enable_irq_window(v);
+            enable_intr_window(v, intr_source);
             return;
         }
 
@@ -129,7 +153,9 @@ asmlinkage void vmx_intr_assist(void)
         idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
         if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
         {
-            __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+            /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */
+            __vmwrite(VM_ENTRY_INTR_INFO_FIELD,
+                      idtv_info_field & ~INTR_INFO_RESVD_BITS_MASK);
 
             /*
              * Safe: the length will only be interpreted for software
@@ -143,8 +169,16 @@ asmlinkage void vmx_intr_assist(void)
             if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
                 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
                           __vmread(IDT_VECTORING_ERROR_CODE));
-            if ( unlikely(intr_source != hvm_intack_none) )
-                enable_irq_window(v);
+            enable_intr_window(v, intr_source);
+
+            /*
+             * Clear NMI-blocking interruptibility info if an NMI delivery
+             * faulted. Re-delivery will re-set it (see SDM 3B 25.7.1.2).
+             */
+            if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
+                __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
+                          __vmread(GUEST_INTERRUPTIBILITY_INFO) &
+                          ~VMX_INTR_SHADOW_NMI);
 
             HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
             return;
@@ -153,14 +187,9 @@ asmlinkage void vmx_intr_assist(void)
         if ( likely(intr_source == hvm_intack_none) )
             return;
 
-        /*
-         * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but
-         * should wait for exit from 'NMI blocking' window (NMI injection to
-         * next IRET). This requires us to use the new 'virtual NMI' support.
-         */
         if ( !hvm_interrupts_enabled(v, intr_source) )
         {
-            enable_irq_window(v);
+            enable_intr_window(v, intr_source);
             return;
         }
     } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
diff -r 9fa9346e1c70 -r e6d5e4709466 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jul 03 17:22:17 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jul 03 18:46:00 2007 +0100
@@ -75,7 +75,7 @@ void vmx_init_vmcs_config(void)
 
     min = (PIN_BASED_EXT_INTR_MASK |
            PIN_BASED_NMI_EXITING);
-    opt = 0; /*PIN_BASED_VIRTUAL_NMIS*/
+    opt = PIN_BASED_VIRTUAL_NMIS;
     _vmx_pin_based_exec_control = adjust_vmx_controls(
         min, opt, MSR_IA32_VMX_PINBASED_CTLS);
 
diff -r 9fa9346e1c70 -r e6d5e4709466 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Jul 03 17:22:17 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Jul 03 18:46:00 2007 +0100
@@ -1106,15 +1106,17 @@ static int vmx_interrupts_enabled(struct
 
     ASSERT(v == current);
 
-    intr_shadow  = __vmread(GUEST_INTERRUPTIBILITY_INFO);
-    intr_shadow &= VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS;
+    intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
 
     if ( type == hvm_intack_nmi )
-        return !intr_shadow;
+        return !(intr_shadow & (VMX_INTR_SHADOW_STI|
+                                VMX_INTR_SHADOW_MOV_SS|
+                                VMX_INTR_SHADOW_NMI));
 
     ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
     eflags = __vmread(GUEST_RFLAGS);
-    return !irq_masked(eflags) && !intr_shadow;
+    return (!irq_masked(eflags) &&
+            !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
 }
 
 static void vmx_update_host_cr3(struct vcpu *v)
@@ -2911,6 +2913,17 @@ asmlinkage void vmx_vmexit_handler(struc
 
         vector = intr_info & INTR_INFO_VECTOR_MASK;
 
+        /*
+         * Re-set the NMI shadow if vmexit caused by a guest IRET fault (see 3B
+         * 25.7.1.2, "Resuming Guest Software after Handling an Exception").
+         * (NB. If we emulate this IRET for any reason, we should re-clear!)
+         */
+        if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
+             !(__vmread(IDT_VECTORING_INFO_FIELD) & INTR_INFO_VALID_MASK) &&
+             (vector != TRAP_double_fault) )
+            __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
+                    __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI);
+
         perfc_incra(cause_vector, vector);
 
         switch ( vector )
diff -r 9fa9346e1c70 -r e6d5e4709466 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Jul 03 17:22:17 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Jul 03 18:46:00 2007 +0100
@@ -137,6 +137,8 @@ extern bool_t cpu_has_vmx_ins_outs_instr
     (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
 #define cpu_has_vmx_tpr_shadow \
     (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
+#define cpu_has_vmx_vnmi \
+    (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
 #define cpu_has_vmx_msr_bitmap \
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
 extern char *vmx_msr_bitmap;
diff -r 9fa9346e1c70 -r e6d5e4709466 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jul 03 17:22:17 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Jul 03 18:46:00 2007 +0100
@@ -90,7 +90,9 @@ void vmx_vlapic_msr_changed(struct vcpu 
 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
 #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
+#define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000          /* 12 */
 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
+#define INTR_INFO_RESVD_BITS_MASK       0x7ffff000
 
 #define INTR_TYPE_EXT_INTR              (0 << 8)    /* external interrupt */
 #define INTR_TYPE_NMI                   (2 << 8)    /* NMI                */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm vmx: Support 'virtual NMI' feature of VMX., Xen patchbot-unstable <=