[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH][HVM] fix migration from NX-capable machine to non-NX-capable machine



Hi Keir -

The attached patch (to 3.1.1-rc2) fixes a hypervisor crash that we're seeing when migrating a HVM guest from a machine that supports the NX bit to one that doesn't (e.g., because it's disabled in the BIOS). It keeps the guest copy of EFER "as is", so the guest will see EFER_NX if it previously set it -- we just won't propagate this EFER bit to a non-NX-capable host.

Signed-off-by: David Lively <dlively@xxxxxxxxxxxxxxx>

I'm not sure whether this problem exists for AMDV, which has an EFER shadow in its vmcb. Does their vmentry mechanism take care of this masking, or will we GPF when trying to propagate the shadow EFER_NX bit to the real one (when NX is disabled)? I'll followup on this and submit a separate AMD patch if it's necessary.

Note this doesn't apply to unstable. I'll do an unstable version if/when this one settles out.

Dave



diff -r a1db76ddfa68 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Sep 27 13:21:28 2007 -0400
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Sep 27 16:13:06 2007 -0400
@@ -369,6 +369,7 @@ static int long_mode_do_msr_write(struct
     struct vcpu *v = current;
     struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
     struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
+    u64 efer_mask = EFER_SCE || (cpu_has_nx ? EFER_NX : 0);
 
     HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
 
@@ -406,9 +407,9 @@ static int long_mode_do_msr_write(struct
             }
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
-            write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
-                       (msr_content & (EFER_NX|EFER_SCE)));
+        if ( (msr_content ^ v->arch.hvm_vmx.efer) & efer_mask )
+            write_efer((read_efer() & ~efer_mask) |
+                       (msr_content & efer_mask));
 
         v->arch.hvm_vmx.efer = msr_content;
         break;
@@ -497,6 +498,7 @@ static void vmx_restore_guest_msrs(struc
     struct vmx_msr_state *guest_msr_state, *host_msr_state;
     unsigned long guest_flags;
     int i;
+    u64 efer_mask = EFER_SCE || (cpu_has_nx ? EFER_NX : 0);
 
     guest_msr_state = &v->arch.hvm_vmx.msr_state;
     host_msr_state = &this_cpu(host_msr_state);
@@ -517,13 +519,13 @@ static void vmx_restore_guest_msrs(struc
         clear_bit(i, &guest_flags);
     }
 
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
+    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & efer_mask )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
                     v->arch.hvm_vmx.efer);
-        write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
-                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
+        write_efer((read_efer() & ~efer_mask) |
+                   (v->arch.hvm_vmx.efer & efer_mask));
     }
 }
 
@@ -550,7 +552,7 @@ static void vmx_restore_host_msrs(void)
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
 {
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
+    if ( cpu_has_nx && ((v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX) )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
@@ -598,7 +600,7 @@ static int long_mode_do_msr_write(struct
             return 0;
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
+        if ( cpu_has_nx && ((msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX) )
             write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
 
         v->arch.hvm_vmx.efer = msr_content;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.