WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Clean up EFER handling. Check CR0/CR

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Clean up EFER handling. Check CR0/CR4/EFER on HVM restore.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 01 Oct 2007 03:10:15 -0700
Delivery-date: Mon, 01 Oct 2007 03:11:53 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1190991644 -3600
# Node ID ff4ff3e3ebbee196efdaf036688fed89cd19e721
# Parent  96f46954c7f1e47624f4fd762ccbaf87378fcc7d
hvm: Clean up EFER handling. Check CR0/CR4/EFER on HVM restore.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |   61 ++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c        |   80 ++++----------------------------------
 xen/arch/x86/hvm/vmx/vmx.c        |   61 ++++------------------------
 xen/include/asm-x86/hvm/support.h |    1 
 4 files changed, 82 insertions(+), 121 deletions(-)

diff -r 96f46954c7f1 -r ff4ff3e3ebbe xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Sep 28 15:05:11 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Sep 28 16:00:44 2007 +0100
@@ -337,6 +337,34 @@ static int hvm_load_cpu_ctxt(struct doma
     if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 
         return -EINVAL;
 
+    /* Sanity check some control registers. */
+    if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) ||
+         !(ctxt.cr0 & X86_CR0_ET) ||
+         ((ctxt.cr0 & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG) )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: bad CR0 0x%"PRIx64"\n",
+                 ctxt.msr_efer);
+        return -EINVAL;
+    }
+
+    if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n",
+                 ctxt.msr_efer);
+        return -EINVAL;
+    }
+
+    if ( (ctxt.msr_efer & ~(EFER_LME | EFER_NX | EFER_SCE)) ||
+         ((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) ||
+         (!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) ||
+         (!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) ||
+         ((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
+                 ctxt.msr_efer);
+        return -EINVAL;
+    }
+
     /* Architecture-specific vmcs/vmcb bits */
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
@@ -530,6 +558,39 @@ void hvm_triple_fault(void)
     gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
              "invoking HVM system reset.\n", v->vcpu_id);
     domain_shutdown(v->domain, SHUTDOWN_reboot);
+}
+
+int hvm_set_efer(uint64_t value)
+{
+    struct vcpu *v = current;
+
+    value &= ~EFER_LMA;
+
+    if ( (value & ~(EFER_LME | EFER_NX | EFER_SCE)) ||
+         ((sizeof(long) != 8) && (value & EFER_LME)) ||
+         (!cpu_has_nx && (value & EFER_NX)) ||
+         (!cpu_has_syscall && (value & EFER_SCE)) )
+    {
+        gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
+                 "EFER: %"PRIx64"\n", value);
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 0;
+    }
+
+    if ( ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_LME) &&
+         hvm_paging_enabled(v) )
+    {
+        gdprintk(XENLOG_WARNING,
+                 "Trying to change EFER.LME with paging enabled\n");
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 0;
+    }
+
+    value |= v->arch.hvm_vcpu.guest_efer & EFER_LMA;
+    v->arch.hvm_vcpu.guest_efer = value;
+    hvm_update_guest_efer(v);
+
+    return 1;
 }
 
 int hvm_set_cr0(unsigned long value)
diff -r 96f46954c7f1 -r ff4ff3e3ebbe xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Sep 28 15:05:11 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Sep 28 16:00:44 2007 +0100
@@ -69,6 +69,8 @@ static void *hsa[NR_CPUS] __read_mostly;
 /* vmcb used for extended host state */
 static void *root_vmcb[NR_CPUS] __read_mostly;
 
+static void svm_update_guest_efer(struct vcpu *v);
+
 static void inline __update_guest_eip(
     struct cpu_user_regs *regs, int inst_len) 
 {
@@ -103,22 +105,10 @@ static void svm_cpu_down(void)
     write_efer(read_efer() & ~EFER_SVME);
 }
 
-static int svm_lme_is_set(struct vcpu *v)
-{
-#ifdef __x86_64__
-    u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
-    return guest_efer & EFER_LME;
-#else
-    return 0;
-#endif
-}
-
 static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
 {
     u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
     u32 ecx = regs->ecx;
-    struct vcpu *v = current;
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
                 ecx, msr_content);
@@ -126,47 +116,8 @@ static enum handler_return long_mode_do_
     switch ( ecx )
     {
     case MSR_EFER:
-        /* Offending reserved bit will cause #GP. */
-#ifdef __x86_64__
-        if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
-#else
-        if ( (msr_content & ~(EFER_NX | EFER_SCE)) ||
-#endif
-             (!cpu_has_nx && (msr_content & EFER_NX)) ||
-             (!cpu_has_syscall && (msr_content & EFER_SCE)) )
-        {
-            gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
-                     "EFER: %"PRIx64"\n", msr_content);
-            goto gp_fault;
-        }
-
-        if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
-        {
-            /* EFER.LME transition from 0 to 1. */
-            if ( hvm_paging_enabled(v) ||
-                 !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
-            {
-                gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
-                         "in paging mode or PAE bit is not set\n");
-                goto gp_fault;
-            }
-        }
-        else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
-        {
-            /* EFER.LME transistion from 1 to 0. */
-            if ( hvm_paging_enabled(v) )
-            {
-                gdprintk(XENLOG_WARNING, 
-                         "Trying to clear EFER.LME while paging enabled\n");
-                goto gp_fault;
-            }
-        }
-
-        v->arch.hvm_vcpu.guest_efer = msr_content;
-        vmcb->efer = msr_content | EFER_SVME;
-        if ( !hvm_paging_enabled(v) )
-            vmcb->efer &= ~(EFER_LME | EFER_LMA);
-
+        if ( !hvm_set_efer(msr_content) )
+            return HNDL_exception_raised;
         break;
 
     case MSR_K8_MC4_MISC: /* Threshold register */
@@ -182,10 +133,6 @@ static enum handler_return long_mode_do_
     }
 
     return HNDL_done;
-
- gp_fault:
-    svm_inject_exception(v, TRAP_gp_fault, 1, 0);
-    return HNDL_exception_raised;
 }
 
 
@@ -449,11 +396,7 @@ static void svm_load_cpu_state(struct vc
     vmcb->cstar      = data->msr_cstar;
     vmcb->sfmask     = data->msr_syscall_mask;
     v->arch.hvm_vcpu.guest_efer = data->msr_efer;
-    vmcb->efer       = data->msr_efer | EFER_SVME;
-    /* VMCB's EFER.LME isn't set unless we're actually in long mode
-     * (see long_mode_do_msr_write()) */
-    if ( !(vmcb->efer & EFER_LMA) )
-        vmcb->efer &= ~EFER_LME;
+    svm_update_guest_efer(v);
 
     hvm_set_guest_time(v, data->tsc);
 }
@@ -543,14 +486,11 @@ static void svm_update_guest_cr(struct v
 
 static void svm_update_guest_efer(struct vcpu *v)
 {
-#ifdef __x86_64__
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
-        vmcb->efer |= EFER_LME | EFER_LMA;
-    else
-        vmcb->efer &= ~(EFER_LME | EFER_LMA);
-#endif
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
+    if ( vmcb->efer & EFER_LMA )
+        vmcb->efer |= EFER_LME;
 }
 
 static void svm_flush_guest_tlbs(void)
diff -r 96f46954c7f1 -r ff4ff3e3ebbe xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Sep 28 15:05:11 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Sep 28 16:00:44 2007 +0100
@@ -199,42 +199,8 @@ static enum handler_return long_mode_do_
     switch ( ecx )
     {
     case MSR_EFER:
-        /* offending reserved bit will cause #GP */
-        if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
-             (!cpu_has_nx && (msr_content & EFER_NX)) ||
-             (!cpu_has_syscall && (msr_content & EFER_SCE)) )
-        {
-            gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
-                     "EFER: %"PRIx64"\n", msr_content);
-            goto gp_fault;
-        }
-
-        if ( (msr_content & EFER_LME)
-             &&  !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
-        {
-            if ( unlikely(hvm_paging_enabled(v)) )
-            {
-                gdprintk(XENLOG_WARNING,
-                         "Trying to set EFER.LME with paging enabled\n");
-                goto gp_fault;
-            }
-        }
-        else if ( !(msr_content & EFER_LME)
-                  && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
-        {
-            if ( unlikely(hvm_paging_enabled(v)) )
-            {
-                gdprintk(XENLOG_WARNING,
-                         "Trying to clear EFER.LME with paging enabled\n");
-                goto gp_fault;
-            }
-        }
-
-        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
-            write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
-                       (msr_content & (EFER_NX|EFER_SCE)));
-
-        v->arch.hvm_vcpu.guest_efer = msr_content;
+        if ( !hvm_set_efer(msr_content) )
+            goto exception_raised;
         break;
 
     case MSR_FS_BASE:
@@ -285,6 +251,7 @@ static enum handler_return long_mode_do_
     HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
  gp_fault:
     vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ exception_raised:
     return HNDL_exception_raised;
 }
 
@@ -380,7 +347,8 @@ static enum handler_return long_mode_do_
     u64 msr_content = 0;
     struct vcpu *v = current;
 
-    switch ( regs->ecx ) {
+    switch ( regs->ecx )
+    {
     case MSR_EFER:
         msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
@@ -398,25 +366,12 @@ static enum handler_return long_mode_do_
 static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
 {
     u64 msr_content = regs->eax | ((u64)regs->edx << 32);
-    struct vcpu *v = current;
 
     switch ( regs->ecx )
     {
     case MSR_EFER:
-        /* offending reserved bit will cause #GP */
-        if ( (msr_content & ~EFER_NX) ||
-             (!cpu_has_nx && (msr_content & EFER_NX)) )
-        {
-            gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
-                     "EFER: %"PRIx64"\n", msr_content);
-            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+        if ( !hvm_set_efer(msr_content) )
             return HNDL_exception_raised;
-        }
-
-        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
-            write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
-
-        v->arch.hvm_vcpu.guest_efer = msr_content;
         break;
 
     default:
@@ -1096,6 +1051,10 @@ static void vmx_update_guest_efer(struct
 
     vmx_vmcs_exit(v);
 #endif
+
+    if ( v == current )
+        write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
+                   (v->arch.hvm_vcpu.guest_efer & (EFER_NX|EFER_SCE)));
 }
 
 static void vmx_flush_guest_tlbs(void)
diff -r 96f46954c7f1 -r ff4ff3e3ebbe xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Fri Sep 28 15:05:11 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Fri Sep 28 16:00:44 2007 +0100
@@ -234,6 +234,7 @@ void hvm_hlt(unsigned long rflags);
 void hvm_hlt(unsigned long rflags);
 void hvm_triple_fault(void);
 
+int hvm_set_efer(uint64_t value);
 int hvm_set_cr0(unsigned long value);
 int hvm_set_cr3(unsigned long value);
 int hvm_set_cr4(unsigned long value);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Clean up EFER handling. Check CR0/CR4/EFER on HVM restore., Xen patchbot-unstable <=