WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Clean up control-register and EFER h

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Clean up control-register and EFER handling.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 16 Aug 2007 07:40:06 -0700
Delivery-date: Thu, 16 Aug 2007 07:41:22 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1186504209 -3600
# Node ID 0f541efbb6d6e3883d103647ff6c8c0d332199dc
# Parent  7953164cebb6dfbbee08d06c91f424b63d87ed71
hvm: Clean up control-register and EFER handling.
No semantic changes. :-)
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c              |   60 ++++++
 xen/arch/x86/hvm/svm/svm.c          |  274 +++++++++----------------------
 xen/arch/x86/hvm/svm/vmcb.c         |   17 -
 xen/arch/x86/hvm/vmx/vmcs.c         |   12 -
 xen/arch/x86/hvm/vmx/vmx.c          |  310 ++++++++++++------------------------
 xen/arch/x86/hvm/vmx/x86_32/exits.S |    2 
 xen/arch/x86/hvm/vmx/x86_64/exits.S |    2 
 xen/arch/x86/mm.c                   |    4 
 xen/arch/x86/mm/hap/guest_walk.c    |    2 
 xen/arch/x86/mm/hap/hap.c           |    2 
 xen/arch/x86/mm/shadow/common.c     |    4 
 xen/arch/x86/mm/shadow/multi.c      |   16 -
 xen/arch/x86/x86_32/asm-offsets.c   |    2 
 xen/arch/x86/x86_64/asm-offsets.c   |    2 
 xen/include/asm-x86/hvm/hvm.h       |   56 +-----
 xen/include/asm-x86/hvm/support.h   |    2 
 xen/include/asm-x86/hvm/svm/asid.h  |    9 -
 xen/include/asm-x86/hvm/svm/vmcb.h  |    5 
 xen/include/asm-x86/hvm/vcpu.h      |   12 +
 xen/include/asm-x86/hvm/vmx/vmcs.h  |    6 
 xen/include/asm-x86/hvm/vmx/vmx.h   |    4 
 21 files changed, 307 insertions(+), 496 deletions(-)

diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Tue Aug 07 17:30:09 2007 +0100
@@ -520,6 +520,63 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
+int hvm_set_cr3(unsigned long value)
+{
+    unsigned long old_base_mfn, mfn;
+    struct vcpu *v = current;
+
+    if ( paging_mode_hap(v->domain) )
+    {
+        v->arch.hvm_vcpu.guest_cr[3] = value;
+        hvm_update_guest_cr3(v, value);
+        goto success;
+    }
+
+    if ( !hvm_paging_enabled(v) )
+    {
+        v->arch.hvm_vcpu.guest_cr[3] = value;
+        goto success;
+    }
+
+    if ( value == v->arch.hvm_vcpu.guest_cr[3] )
+    {
+        /* 
+         * This is simple TLB flush, implying the guest has removed some
+         * translation or changed page attributes. Invalidate the shadow.
+         */
+        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
+            goto bad_cr3;
+    }
+    else 
+    {
+        /* Make a shadow. Check that the PDBR is valid first. */
+        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
+        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
+        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
+            goto bad_cr3;
+
+        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+        v->arch.guest_table = pagetable_from_pfn(mfn);
+
+        if ( old_base_mfn )
+            put_page(mfn_to_page(old_base_mfn));
+
+        v->arch.hvm_vcpu.guest_cr[3] = value;
+        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
+    }
+
+    paging_update_cr3(v);
+
+ success:
+    return 1;
+
+ bad_cr3:
+    gdprintk(XENLOG_ERR, "Invalid CR3\n");
+    domain_crash(v->domain);
+    return 0;
+}
+
 /*
  * __hvm_copy():
  *  @buf  = hypervisor buffer
@@ -668,7 +725,6 @@ static hvm_hypercall_t *hvm_hypercall32_
 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
     HYPERCALL(memory_op),
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
-    HYPERCALL(multicall),
     HYPERCALL(xen_version),
     HYPERCALL(grant_table_op),
     HYPERCALL(event_channel_op),
@@ -815,7 +871,7 @@ int hvm_do_hypercall(struct cpu_user_reg
 
 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
 {
-    v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
+    v->arch.hvm_vcpu.hw_cr[3] = guest_cr3;
     hvm_funcs.update_guest_cr3(v);
 }
 
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Aug 07 17:30:09 2007 +0100
@@ -78,7 +78,7 @@ static void svm_inject_exception(
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     if ( trap == TRAP_page_fault )
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
     else
         HVMTRACE_2D(INJ_EXC, v, trap, error_code);
 
@@ -97,55 +97,14 @@ static void svm_cpu_down(void)
     write_efer(read_efer() & ~EFER_SVME);
 }
 
+static int svm_lme_is_set(struct vcpu *v)
+{
 #ifdef __x86_64__
-
-static int svm_lme_is_set(struct vcpu *v)
-{
-    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+    u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
     return guest_efer & EFER_LME;
-}
-
-static int svm_long_mode_enabled(struct vcpu *v)
-{
-    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
-    return guest_efer & EFER_LMA;
-}
-
-#else /* __i386__ */
-
-static int svm_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int svm_long_mode_enabled(struct vcpu *v)
-{ return 0; }
-
+#else
+    return 0;
 #endif
-
-static int svm_cr4_pae_is_set(struct vcpu *v)
-{
-    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
-    return guest_cr4 & X86_CR4_PAE;
-}
-
-static int svm_paging_enabled(struct vcpu *v)
-{
-    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static int svm_pae_enabled(struct vcpu *v)
-{
-    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
-    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static int svm_nx_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static int svm_pgbit_test(struct vcpu *v)
-{
-    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
 }
 
 static void svm_store_cpu_guest_regs(
@@ -165,10 +124,10 @@ static void svm_store_cpu_guest_regs(
     if ( crs != NULL )
     {
         /* Returning the guest's regs */
-        crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
-        crs[2] = v->arch.hvm_svm.cpu_cr2;
-        crs[3] = v->arch.hvm_svm.cpu_cr3;
-        crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
+        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
     }
 }
 
@@ -202,7 +161,8 @@ static enum handler_return long_mode_do_
         if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
         {
             /* EFER.LME transition from 0 to 1. */
-            if ( svm_paging_enabled(v) || !svm_cr4_pae_is_set(v) )
+            if ( hvm_paging_enabled(v) ||
+                 !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
                          "in paging mode or PAE bit is not set\n");
@@ -212,7 +172,7 @@ static enum handler_return long_mode_do_
         else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
         {
             /* EFER.LME transistion from 1 to 0. */
-            if ( svm_paging_enabled(v) )
+            if ( hvm_paging_enabled(v) )
             {
                 gdprintk(XENLOG_WARNING, 
                          "Trying to clear EFER.LME while paging enabled\n");
@@ -220,9 +180,9 @@ static enum handler_return long_mode_do_
             }
         }
 
-        v->arch.hvm_svm.cpu_shadow_efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         vmcb->efer = msr_content | EFER_SVME;
-        if ( !svm_paging_enabled(v) )
+        if ( !hvm_paging_enabled(v) )
             vmcb->efer &= ~(EFER_LME | EFER_LMA);
 
         break;
@@ -297,10 +257,10 @@ int svm_vmcb_save(struct vcpu *v, struct
     c->rsp = vmcb->rsp;
     c->rflags = vmcb->rflags;
 
-    c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-    c->cr2 = v->arch.hvm_svm.cpu_cr2;
-    c->cr3 = v->arch.hvm_svm.cpu_cr3;
-    c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -383,10 +343,10 @@ int svm_vmcb_restore(struct vcpu *v, str
     vmcb->rsp    = c->rsp;
     vmcb->rflags = c->rflags;
 
-    v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
     vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
 
-    v->arch.hvm_svm.cpu_cr2 = c->cr2;
+    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -396,13 +356,13 @@ int svm_vmcb_restore(struct vcpu *v, str
             c->cr4);
 #endif
 
-    if ( !svm_paging_enabled(v) ) 
+    if ( !hvm_paging_enabled(v) ) 
     {
         printk("%s: paging not enabled.\n", __func__);
         goto skip_cr3;
     }
 
-    if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 ) 
+    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] ) 
     {
         /*
          * This is simple TLB flush, implying the guest has
@@ -428,12 +388,12 @@ int svm_vmcb_restore(struct vcpu *v, str
         v->arch.guest_table = pagetable_from_pfn(mfn);
         if (old_base_mfn)
              put_page(mfn_to_page(old_base_mfn));
-        v->arch.hvm_svm.cpu_cr3 = c->cr3;
+        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
     }
 
  skip_cr3:
     vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
-    v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
     
     vmcb->idtr.limit = c->idtr_limit;
     vmcb->idtr.base  = c->idtr_base;
@@ -488,8 +448,8 @@ int svm_vmcb_restore(struct vcpu *v, str
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 |
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
                      (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
         vmcb->cr3 = c->cr3;
         vmcb->np_enable = 1;
@@ -540,7 +500,7 @@ static void svm_save_cpu_state(struct vc
     data->msr_star         = vmcb->star;
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
-    data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
+    data->msr_efer         = v->arch.hvm_vcpu.guest_efer;
     data->msr_flags        = -1ULL;
 
     data->tsc = hvm_get_guest_time(v);
@@ -556,7 +516,7 @@ static void svm_load_cpu_state(struct vc
     vmcb->star       = data->msr_star;
     vmcb->cstar      = data->msr_cstar;
     vmcb->sfmask     = data->msr_syscall_mask;
-    v->arch.hvm_svm.cpu_shadow_efer = data->msr_efer;
+    v->arch.hvm_vcpu.guest_efer = data->msr_efer;
     vmcb->efer       = data->msr_efer | EFER_SVME;
     /* VMCB's EFER.LME isn't set unless we're actually in long mode
      * (see long_mode_do_msr_write()) */
@@ -605,11 +565,11 @@ static int svm_guest_x86_mode(struct vcp
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
+    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
     if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
         return 1;
-    if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
+    if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
         return 8;
     return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
 }
@@ -621,7 +581,8 @@ static void svm_update_host_cr3(struct v
 
 static void svm_update_guest_cr3(struct vcpu *v)
 {
-    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
+    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
+    svm_asid_inv_asid(v);
 }
 
 static void svm_flush_guest_tlbs(void)
@@ -639,24 +600,6 @@ static void svm_update_vtpr(struct vcpu 
     vmcb->vintr.fields.tpr = value & 0x0f;
 }
 
-static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    switch ( num )
-    {
-    case 0:
-        return v->arch.hvm_svm.cpu_shadow_cr0;
-    case 2:
-        return v->arch.hvm_svm.cpu_cr2;
-    case 3:
-        return v->arch.hvm_svm.cpu_cr3;
-    case 4:
-        return v->arch.hvm_svm.cpu_shadow_cr4;
-    default:
-        BUG();
-    }
-    return 0;                   /* dummy */
-}
-
 static void svm_sync_vmcb(struct vcpu *v)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -674,7 +617,7 @@ static unsigned long svm_get_segment_bas
 static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+    int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
 
     switch ( seg )
     {
@@ -748,7 +691,7 @@ static void svm_stts(struct vcpu *v)
      * then this is not necessary: no FPU activity can occur until the guest 
      * clears CR0.TS, and we will initialise the FPU when that happens.
      */
-    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
     {
         v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
         vmcb->cr0 |= X86_CR0_TS;
@@ -949,7 +892,7 @@ static void svm_hvm_inject_exception(
 {
     struct vcpu *v = current;
     if ( trapnr == TRAP_page_fault )
-        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
+        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
     svm_inject_exception(v, trapnr, (errcode != -1), errcode);
 }
 
@@ -970,13 +913,8 @@ static struct hvm_function_table svm_fun
     .load_cpu_guest_regs  = svm_load_cpu_guest_regs,
     .save_cpu_ctxt        = svm_save_vmcb_ctxt,
     .load_cpu_ctxt        = svm_load_vmcb_ctxt,
-    .paging_enabled       = svm_paging_enabled,
-    .long_mode_enabled    = svm_long_mode_enabled,
-    .pae_enabled          = svm_pae_enabled,
-    .nx_enabled           = svm_nx_enabled,
     .interrupts_enabled   = svm_interrupts_enabled,
     .guest_x86_mode       = svm_guest_x86_mode,
-    .get_guest_ctrl_reg   = svm_get_ctrl_reg,
     .get_segment_base     = svm_get_segment_base,
     .get_segment_register = svm_get_segment_register,
     .update_host_cr3      = svm_update_host_cr3,
@@ -1075,7 +1013,7 @@ static void svm_do_no_device_fault(struc
     setup_fpu(v);    
     vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
 
-    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
         vmcb->cr0 &= ~X86_CR0_TS;
 }
 
@@ -1347,7 +1285,7 @@ static int svm_get_io_address(
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     /* If we're in long mode, don't check the segment presence & limit */
-    long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
+    long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
 
     /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
      * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
@@ -1650,7 +1588,7 @@ static int svm_set_cr0(unsigned long val
 static int svm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
-    unsigned long mfn, old_value = v->arch.hvm_svm.cpu_shadow_cr0;
+    unsigned long mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     unsigned long old_base_mfn;
   
@@ -1687,25 +1625,25 @@ static int svm_set_cr0(unsigned long val
     {
         if ( svm_lme_is_set(v) )
         {
-            if ( !svm_cr4_pae_is_set(v) )
+            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
                 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
                 return 0;
             }
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
-            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
+            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
             vmcb->efer |= EFER_LMA | EFER_LME;
         }
 
         if ( !paging_mode_hap(v->domain) )
         {
             /* The guest CR3 must be pointing to the guest physical. */
-            mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
+            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> 
PAGE_SHIFT);
             if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
             {
                 gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
-                         v->arch.hvm_svm.cpu_cr3, mfn);
+                         v->arch.hvm_vcpu.guest_cr[3], mfn);
                 domain_crash(v->domain);
                 return 0;
             }
@@ -1717,27 +1655,27 @@ static int svm_set_cr0(unsigned long val
                 put_page(mfn_to_page(old_base_mfn));
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                        v->arch.hvm_vmx.cpu_cr3, mfn);
+                        v->arch.hvm_vcpu.guest_cr[3], mfn);
         }
     }
     else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
     {
         /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( svm_long_mode_enabled(v) )
+        if ( hvm_long_mode_enabled(v) )
         {
             vmcb->efer &= ~(EFER_LME | EFER_LMA);
-            v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
-        }
-
-        if ( !paging_mode_hap(v->domain) && v->arch.hvm_svm.cpu_cr3 )
+            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+        }
+
+        if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
         {
             put_page(mfn_to_page(get_mfn_from_gpfn(
-                v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
+                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
             v->arch.guest_table = pagetable_null();
         }
     }
 
-    vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0 = value;
+    vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] = value;
     if ( !paging_mode_hap(v->domain) )
         vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
 
@@ -1763,16 +1701,16 @@ static void mov_from_cr(int cr, int gp, 
     switch ( cr )
     {
     case 0:
-        value = v->arch.hvm_svm.cpu_shadow_cr0;
+        value = v->arch.hvm_vcpu.guest_cr[0];
         break;
     case 2:
         value = vmcb->cr2;
         break;
     case 3:
-        value = (unsigned long)v->arch.hvm_svm.cpu_cr3;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
         break;
     case 4:
-        value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
         break;
     case 8:
         value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -1797,7 +1735,7 @@ static void mov_from_cr(int cr, int gp, 
  */
 static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
 {
-    unsigned long value, old_cr, old_base_mfn, mfn;
+    unsigned long value, old_cr;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1815,60 +1753,9 @@ static int mov_to_cr(int gpreg, int cr, 
         return svm_set_cr0(value);
 
     case 3:
-        if ( paging_mode_hap(v->domain) )
-        {
-            vmcb->cr3 = v->arch.hvm_svm.cpu_cr3 = value;
-            break;
-        }
-
-        /* If paging is not enabled yet, simply copy the value to CR3. */
-        if ( !svm_paging_enabled(v) )
-        {
-            v->arch.hvm_svm.cpu_cr3 = value;
-            break;
-        }
-
-        /* We make a new one if the shadow does not exist. */
-        if ( value == v->arch.hvm_svm.cpu_cr3 )
-        {
-            /* 
-             * This is simple TLB flush, implying the guest has 
-             * removed some translation or changed page attributes.
-             * We simply invalidate the shadow.
-             */
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
-                goto bad_cr3;
-            paging_update_cr3(v);
-            /* signal paging update to ASID handler */
-            svm_asid_g_mov_to_cr3 (v);
-        }
-        else 
-        {
-            /*
-             * If different, make a shadow. Check if the PDBR is valid
-             * first.
-             */
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
-                goto bad_cr3;
-
-            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = pagetable_from_pfn(mfn);
-
-            if ( old_base_mfn )
-                put_page(mfn_to_page(old_base_mfn));
-
-            v->arch.hvm_svm.cpu_cr3 = value;
-            update_cr3(v);
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
-            /* signal paging update to ASID handler */
-            svm_asid_g_mov_to_cr3 (v);
-        }
-        break;
-
-    case 4: /* CR4 */
+        return hvm_set_cr3(value);
+
+    case 4:
         if ( value & HVM_CR4_GUEST_RESERVED_BITS )
         {
             HVM_DBG_LOG(DBG_LEVEL_1,
@@ -1880,7 +1767,7 @@ static int mov_to_cr(int gpreg, int cr, 
 
         if ( paging_mode_hap(v->domain) )
         {
-            v->arch.hvm_svm.cpu_shadow_cr4 = value;
+            v->arch.hvm_vcpu.guest_cr[4] = value;
             vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
             paging_update_paging_modes(v);
             /* signal paging update to ASID handler */
@@ -1888,15 +1775,15 @@ static int mov_to_cr(int gpreg, int cr, 
             break;
         }
 
-        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
+        old_cr = v->arch.hvm_vcpu.guest_cr[4];
         if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
         {
-            if ( svm_pgbit_test(v) )
+            if ( hvm_paging_enabled(v) )
             {
 #if CONFIG_PAGING_LEVELS >= 3
                 /* The guest is a 32-bit PAE guest. */
                 unsigned long mfn, old_base_mfn;
-                mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
+                mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> 
PAGE_SHIFT);
                 if ( !mfn_valid(mfn) || 
                      !get_page(mfn_to_page(mfn), v->domain) )
                     goto bad_cr3;
@@ -1914,19 +1801,19 @@ static int mov_to_cr(int gpreg, int cr, 
 
                 HVM_DBG_LOG(DBG_LEVEL_VMMU, 
                             "Update CR3 value = %lx, mfn = %lx",
-                            v->arch.hvm_svm.cpu_cr3, mfn);
+                            v->arch.hvm_vcpu.guest_cr[3], mfn);
 #endif
             }
         } 
         else if ( !(value & X86_CR4_PAE) )
         {
-            if ( svm_long_mode_enabled(v) )
+            if ( hvm_long_mode_enabled(v) )
             {
                 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
             }
         }
 
-        v->arch.hvm_svm.cpu_shadow_cr4 = value;
+        v->arch.hvm_vcpu.guest_cr[4] = value;
         vmcb->cr4 = value | HVM_CR4_HOST_MASK;
   
         /*
@@ -2025,18 +1912,18 @@ static int svm_cr_access(struct vcpu *v,
         setup_fpu(current);
         vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
         vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
-        v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
         break;
 
     case INSTR_LMSW:
         gpreg = decode_src_reg(prefix, buffer[index+2]);
         value = get_reg(gpreg, regs, vmcb) & 0xF;
-        value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
+        value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
         result = svm_set_cr0(value);
         break;
 
     case INSTR_SMSW:
-        value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
+        value = v->arch.hvm_vcpu.guest_cr[0] & 0xFFFF;
         modrm = buffer[index+2];
         addr_size = svm_guest_x86_mode(v);
         if ( addr_size < 2 )
@@ -2129,7 +2016,7 @@ static void svm_do_msr_access(
             break;
 
         case MSR_EFER:
-            msr_content = v->arch.hvm_svm.cpu_shadow_efer;
+            msr_content = v->arch.hvm_vcpu.guest_efer;
             break;
 
         case MSR_K8_MC4_MISC: /* Threshold register */
@@ -2335,29 +2222,28 @@ static int svm_reset_to_realmode(struct 
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    /* clear the vmcb and user regs */
     memset(regs, 0, sizeof(struct cpu_user_regs));
-   
-    /* VMCB State */
+
     vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
-    v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
+    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
 
     vmcb->cr2 = 0;
     vmcb->efer = EFER_SVME;
 
     vmcb->cr4 = HVM_CR4_HOST_MASK;
-    v->arch.hvm_svm.cpu_shadow_cr4 = 0;
-
-    if ( paging_mode_hap(v->domain) ) {
-        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
-        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
-                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+    v->arch.hvm_vcpu.guest_cr[4] = 0;
+
+    if ( paging_mode_hap(v->domain) )
+    {
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
+                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
     }
 
     /* This will jump to ROMBIOS */
     vmcb->rip = 0xFFF0;
 
-    /* setup the segment registers and all their hidden states */
+    /* Set up the segment registers and all their hidden states. */
     vmcb->cs.sel = 0xF000;
     vmcb->cs.attr.bytes = 0x089b;
     vmcb->cs.limit = 0xffff;
@@ -2495,7 +2381,7 @@ asmlinkage void svm_vmexit_handler(struc
             break;
         }
 
-        v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
+        v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
         svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
         break;
     }
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Aug 07 17:30:09 2007 +0100
@@ -111,7 +111,7 @@ static int construct_vmcb(struct vcpu *v
     svm_segment_attributes_t attrib;
 
     /* TLB control, and ASID assigment. */
-    svm_asid_init_vcpu (v);
+    svm_asid_init_vcpu(v);
 
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
@@ -218,25 +218,24 @@ static int construct_vmcb(struct vcpu *v
 
     /* Guest CR0. */
     vmcb->cr0 = read_cr0();
-    arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
-    vmcb->cr0 |= X86_CR0_WP;
+    v->arch.hvm_vcpu.guest_cr[0] = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
 
     /* Guest CR4. */
-    arch_svm->cpu_shadow_cr4 =
+    v->arch.hvm_vcpu.guest_cr[4] =
         read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
-    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
+    vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
 
     paging_update_paging_modes(v);
-    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
+    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; 
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = arch_svm->cpu_shadow_cr0;
+        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
-                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+        vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] =
+            HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
         vmcb->exception_intercepts = HVM_TRAP_MASK;
 
         /* No point in intercepting CR3/4 reads, because the hardware 
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Aug 07 17:30:09 2007 +0100
@@ -506,17 +506,17 @@ static void construct_vmcs(struct vcpu *
 
     /* Guest CR0. */
     cr0 = read_cr0();
-    v->arch.hvm_vmx.cpu_cr0 = cr0;
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+    v->arch.hvm_vcpu.hw_cr[0] = cr0;
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+    v->arch.hvm_vcpu.guest_cr[0] = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
 
     /* Guest CR4. */
     cr4 = read_cr4();
     __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
-    v->arch.hvm_vmx.cpu_shadow_cr4 =
+    v->arch.hvm_vcpu.guest_cr[4] =
         cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     if ( cpu_has_vmx_tpr_shadow )
     {
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Aug 07 17:30:09 2007 +0100
@@ -100,39 +100,11 @@ static void vmx_vcpu_destroy(struct vcpu
     vmx_destroy_vmcs(v);
 }
 
-static int vmx_paging_enabled(struct vcpu *v)
-{
-    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
-}
-
-static int vmx_pgbit_test(struct vcpu *v)
-{
-    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    return cr0 & X86_CR0_PG;
-}
-
-static int vmx_pae_enabled(struct vcpu *v)
-{
-    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
-    return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
-}
-
-static int vmx_nx_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_vmx.efer & EFER_NX;
-}
-
 #ifdef __x86_64__
 
 static int vmx_lme_is_set(struct vcpu *v)
 {
-    return v->arch.hvm_vmx.efer & EFER_LME;
-}
-
-static int vmx_long_mode_enabled(struct vcpu *v)
-{
-    return v->arch.hvm_vmx.efer & EFER_LMA;
+    return v->arch.hvm_vcpu.guest_efer & EFER_LME;
 }
 
 static void vmx_enable_long_mode(struct vcpu *v)
@@ -143,7 +115,7 @@ static void vmx_enable_long_mode(struct 
     vm_entry_value |= VM_ENTRY_IA32E_MODE;
     __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
-    v->arch.hvm_vmx.efer |= EFER_LMA;
+    v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
 }
 
 static void vmx_disable_long_mode(struct vcpu *v)
@@ -154,7 +126,7 @@ static void vmx_disable_long_mode(struct
     vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
     __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
 
-    v->arch.hvm_vmx.efer &= ~EFER_LMA;
+    v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
 }
 
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
@@ -190,7 +162,7 @@ static enum handler_return long_mode_do_
     switch ( ecx )
     {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vmx.efer;
+        msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     case MSR_FS_BASE:
@@ -204,7 +176,7 @@ static enum handler_return long_mode_do_
     case MSR_SHADOW_GS_BASE:
         msr_content = v->arch.hvm_vmx.shadow_gs;
     check_long_mode:
-        if ( !(vmx_long_mode_enabled(v)) )
+        if ( !(hvm_long_mode_enabled(v)) )
         {
             vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
             return HNDL_exception_raised;
@@ -263,9 +235,9 @@ static enum handler_return long_mode_do_
         }
 
         if ( (msr_content & EFER_LME)
-             &&  !(v->arch.hvm_vmx.efer & EFER_LME) )
-        {
-            if ( unlikely(vmx_paging_enabled(v)) )
+             &&  !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+        {
+            if ( unlikely(hvm_paging_enabled(v)) )
             {
                 gdprintk(XENLOG_WARNING,
                          "Trying to set EFER.LME with paging enabled\n");
@@ -273,9 +245,9 @@ static enum handler_return long_mode_do_
             }
         }
         else if ( !(msr_content & EFER_LME)
-                  && (v->arch.hvm_vmx.efer & EFER_LME) )
-        {
-            if ( unlikely(vmx_paging_enabled(v)) )
+                  && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
+        {
+            if ( unlikely(hvm_paging_enabled(v)) )
             {
                 gdprintk(XENLOG_WARNING,
                          "Trying to clear EFER.LME with paging enabled\n");
@@ -283,17 +255,17 @@ static enum handler_return long_mode_do_
             }
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
+        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
             write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
                        (msr_content & (EFER_NX|EFER_SCE)));
 
-        v->arch.hvm_vmx.efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         break;
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
     case MSR_SHADOW_GS_BASE:
-        if ( !vmx_long_mode_enabled(v) )
+        if ( !hvm_long_mode_enabled(v) )
             goto gp_fault;
 
         if ( !is_canonical_address(msr_content) )
@@ -394,21 +366,19 @@ static void vmx_restore_guest_msrs(struc
         clear_bit(i, &guest_flags);
     }
 
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
+    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
-                    v->arch.hvm_vmx.efer);
+                    v->arch.hvm_vcpu.guest_efer);
         write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
-                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
+                   (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
     }
 }
 
 #else  /* __i386__ */
 
 static int vmx_lme_is_set(struct vcpu *v)
-{ return 0; }
-static int vmx_long_mode_enabled(struct vcpu *v)
 { return 0; }
 static void vmx_enable_long_mode(struct vcpu *v)
 { BUG(); }
@@ -427,13 +397,13 @@ static void vmx_restore_host_msrs(void)
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
 {
-    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
+    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
     {
         HVM_DBG_LOG(DBG_LEVEL_2,
                     "restore guest's EFER with value %lx",
-                    v->arch.hvm_vmx.efer);
+                    v->arch.hvm_vcpu.guest_efer);
         write_efer((read_efer() & ~EFER_NX) |
-                   (v->arch.hvm_vmx.efer & EFER_NX));
+                   (v->arch.hvm_vcpu.guest_efer & EFER_NX));
     }
 }
 
@@ -444,7 +414,7 @@ static enum handler_return long_mode_do_
 
     switch ( regs->ecx ) {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vmx.efer;
+        msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     default:
@@ -475,10 +445,10 @@ static enum handler_return long_mode_do_
             return HNDL_exception_raised;
         }
 
-        if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
+        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
             write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
 
-        v->arch.hvm_vmx.efer = msr_content;
+        v->arch.hvm_vcpu.guest_efer = msr_content;
         break;
 
     default:
@@ -501,12 +471,12 @@ static int vmx_guest_x86_mode(struct vcp
 
     ASSERT(v == current);
 
-    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
+    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
         return 0;
     if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
         return 1;
     cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-    if ( vmx_long_mode_enabled(v) &&
+    if ( hvm_long_mode_enabled(v) &&
          likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         return 8;
     return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
@@ -551,12 +521,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
     c->rsp = __vmread(GUEST_RSP);
     c->rflags = __vmread(GUEST_RFLAGS);
 
-    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
-    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
-    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
-
-    c->msr_efer = v->arch.hvm_vmx.efer;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
+
+    c->msr_efer = v->arch.hvm_vcpu.guest_efer;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
@@ -635,22 +605,22 @@ int vmx_vmcs_restore(struct vcpu *v, str
     __vmwrite(GUEST_RSP, c->rsp);
     __vmwrite(GUEST_RFLAGS, c->rflags);
 
-    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
-                               X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
-    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
-
-    v->arch.hvm_vmx.efer = c->msr_efer;
+    v->arch.hvm_vcpu.hw_cr[0] = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
+                                 X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
+
+    v->arch.hvm_vcpu.guest_efer = c->msr_efer;
 
 #ifdef HVM_DEBUG_SUSPEND
     printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
            __func__, c->cr3, c->cr0, c->cr4);
 #endif
 
-    if ( !vmx_paging_enabled(v) )
+    if ( !hvm_paging_enabled(v) )
     {
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
         goto skip_cr3;
@@ -672,14 +642,14 @@ int vmx_vmcs_restore(struct vcpu *v, str
         put_page(mfn_to_page(old_base_mfn));
 
  skip_cr3:
-    v->arch.hvm_vmx.cpu_cr3 = c->cr3;
-
-    if ( vmx_long_mode_enabled(v) )
+    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+
+    if ( hvm_long_mode_enabled(v) )
         vmx_enable_long_mode(v);
 
     __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
-    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
     __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -884,10 +854,10 @@ static void vmx_store_cpu_guest_regs(
 
     if ( crs != NULL )
     {
-        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
-        crs[2] = v->arch.hvm_vmx.cpu_cr2;
-        crs[3] = v->arch.hvm_vmx.cpu_cr3;
-        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
+        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
     }
 
     vmx_vmcs_exit(v);
@@ -928,24 +898,6 @@ static void vmx_load_cpu_guest_regs(stru
     vmx_vmcs_exit(v);
 }
 
-static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    switch ( num )
-    {
-    case 0:
-        return v->arch.hvm_vmx.cpu_cr0;
-    case 2:
-        return v->arch.hvm_vmx.cpu_cr2;
-    case 3:
-        return v->arch.hvm_vmx.cpu_cr3;
-    case 4:
-        return v->arch.hvm_vmx.cpu_shadow_cr4;
-    default:
-        BUG();
-    }
-    return 0;                   /* dummy */
-}
-
 static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
 {
     unsigned long base = 0;
@@ -953,7 +905,7 @@ static unsigned long vmx_get_segment_bas
 
     ASSERT(v == current);
 
-    if ( vmx_long_mode_enabled(v) &&
+    if ( hvm_long_mode_enabled(v) &&
          (__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
         long_mode = 1;
 
@@ -1059,10 +1011,10 @@ static void vmx_stts(struct vcpu *v)
      * then this is not necessary: no FPU activity can occur until the guest
      * clears CR0.TS, and we will initialise the FPU when that happens.
      */
-    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
-    {
-        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
         __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
     }
 }
@@ -1139,7 +1091,7 @@ static void vmx_update_guest_cr3(struct 
 {
     ASSERT((v == current) || !vcpu_runnable(v));
     vmx_vmcs_enter(v);
-    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
     vmx_vmcs_exit(v);
 }
 
@@ -1156,7 +1108,7 @@ static void vmx_inject_exception(
     struct vcpu *v = current;
     vmx_inject_hw_exception(v, trapnr, errcode);
     if ( trapnr == TRAP_page_fault )
-        v->arch.hvm_vmx.cpu_cr2 = cr2;
+        v->arch.hvm_vcpu.guest_cr[2] = cr2;
 }
 
 static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
@@ -1200,13 +1152,8 @@ static struct hvm_function_table vmx_fun
     .load_cpu_guest_regs  = vmx_load_cpu_guest_regs,
     .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
     .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
-    .paging_enabled       = vmx_paging_enabled,
-    .long_mode_enabled    = vmx_long_mode_enabled,
-    .pae_enabled          = vmx_pae_enabled,
-    .nx_enabled           = vmx_nx_enabled,
     .interrupts_enabled   = vmx_interrupts_enabled,
     .guest_x86_mode       = vmx_guest_x86_mode,
-    .get_guest_ctrl_reg   = vmx_get_ctrl_reg,
     .get_segment_base     = vmx_get_segment_base,
     .get_segment_register = vmx_get_segment_register,
     .update_host_cr3      = vmx_update_host_cr3,
@@ -1315,10 +1262,10 @@ static void vmx_do_no_device_fault(void)
     __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
 
     /* Disable TS in guest CR0 unless the guest wants the exception too. */
-    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
-    {
-        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
+    {
+        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
     }
 }
 
@@ -1773,7 +1720,7 @@ static void vmx_do_str_pio(unsigned long
 
     sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
     ar_bytes = __vmread(GUEST_CS_AR_BYTES);
-    if ( vmx_long_mode_enabled(current) &&
+    if ( hvm_long_mode_enabled(current) &&
          (ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
         long_mode = 1;
     addr = __vmread(GUEST_LINEAR_ADDRESS);
@@ -1900,9 +1847,9 @@ static void vmx_world_save(struct vcpu *
     c->esp = __vmread(GUEST_RSP);
     c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
 
-    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
-    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
-    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
+    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
 
     c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
     c->idtr_base = __vmread(GUEST_IDTR_BASE);
@@ -1959,13 +1906,13 @@ static int vmx_world_restore(struct vcpu
     __vmwrite(GUEST_RSP, c->esp);
     __vmwrite(GUEST_RFLAGS, c->eflags);
 
-    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
-
-    if ( !vmx_paging_enabled(v) )
+    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+    if ( !hvm_paging_enabled(v) )
         goto skip_cr3;
 
-    if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
+    if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
     {
         /*
          * This is simple TLB flush, implying the guest has
@@ -1990,18 +1937,18 @@ static int vmx_world_restore(struct vcpu
         v->arch.guest_table = pagetable_from_pfn(mfn);
         if ( old_base_mfn )
              put_page(mfn_to_page(old_base_mfn));
-        v->arch.hvm_vmx.cpu_cr3 = c->cr3;
+        v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
     }
 
  skip_cr3:
-    if ( !vmx_paging_enabled(v) )
+    if ( !hvm_paging_enabled(v) )
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
     else
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
 
     __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
-    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
-    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
     __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
     __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -2184,22 +2131,22 @@ static int vmx_set_cr0(unsigned long val
         __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
     }
 
-    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
+    old_cr0 = v->arch.hvm_vcpu.guest_cr[0];
     paging_enabled = old_cr0 & X86_CR0_PG;
 
-    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
-                               | X86_CR0_NE | X86_CR0_WP);
-    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
-    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+    v->arch.hvm_vcpu.hw_cr[0] = (value | X86_CR0_PE | X86_CR0_PG |
+                                 X86_CR0_NE | X86_CR0_WP);
+    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+    v->arch.hvm_vcpu.guest_cr[0] = value;
+    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
 
     /* Trying to enable paging. */
     if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
     {
-        if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
-        {
-            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
+        if ( vmx_lme_is_set(v) && !hvm_long_mode_enabled(v) )
+        {
+            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
                             "with EFER.LME set but not CR4.PAE");
@@ -2214,11 +2161,11 @@ static int vmx_set_cr0(unsigned long val
         /*
          * The guest CR3 must be pointing to the guest physical.
          */
-        mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
+        mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
         if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
         {
             gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
-                     v->arch.hvm_vmx.cpu_cr3, mfn);
+                     v->arch.hvm_vcpu.guest_cr[3], mfn);
             domain_crash(v->domain);
             return 0;
         }
@@ -2232,7 +2179,7 @@ static int vmx_set_cr0(unsigned long val
             put_page(mfn_to_page(old_base_mfn));
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                    v->arch.hvm_vmx.cpu_cr3, mfn);
+                    v->arch.hvm_vcpu.guest_cr[3], mfn);
 
         paging_update_paging_modes(v);
     }
@@ -2242,13 +2189,13 @@ static int vmx_set_cr0(unsigned long val
          paging_enabled )
     {
         /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( vmx_long_mode_enabled(v) )
+        if ( hvm_long_mode_enabled(v) )
             vmx_disable_long_mode(v);
 
-        if ( v->arch.hvm_vmx.cpu_cr3 )
+        if ( v->arch.hvm_vcpu.guest_cr[3] )
         {
             put_page(mfn_to_page(get_mfn_from_gpfn(
-                      v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
+                      v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
             v->arch.guest_table = pagetable_null();
         }
     }
@@ -2321,7 +2268,7 @@ static int vmx_set_cr0(unsigned long val
  */
 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
 {
-    unsigned long value, old_cr, old_base_mfn, mfn;
+    unsigned long value, old_cr;
     struct vcpu *v = current;
     struct vlapic *vlapic = vcpu_vlapic(v);
 
@@ -2353,49 +2300,10 @@ static int mov_to_cr(int gp, int cr, str
         return vmx_set_cr0(value);
 
     case 3:
-        /*
-         * If paging is not enabled yet, simply copy the value to CR3.
-         */
-        if ( !vmx_paging_enabled(v) )
-        {
-            v->arch.hvm_vmx.cpu_cr3 = value;
-            break;
-        }
-
-        /*
-         * We make a new one if the shadow does not exist.
-         */
-        if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
-            /*
-             * This is simple TLB flush, implying the guest has
-             * removed some translation or changed page attributes.
-             * We simply invalidate the shadow.
-             */
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
-                goto bad_cr3;
-            paging_update_cr3(v);
-        } else {
-            /*
-             * If different, make a shadow. Check if the PDBR is valid
-             * first.
-             */
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
-            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
-                goto bad_cr3;
-            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = pagetable_from_pfn(mfn);
-            if ( old_base_mfn )
-                put_page(mfn_to_page(old_base_mfn));
-            v->arch.hvm_vmx.cpu_cr3 = value;
-            update_cr3(v);
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
-        }
-        break;
-
-    case 4: /* CR4 */
-        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
+        return hvm_set_cr3(value);
+
+    case 4:
+        old_cr = v->arch.hvm_vcpu.guest_cr[4];
 
         if ( value & HVM_CR4_GUEST_RESERVED_BITS )
         {
@@ -2408,12 +2316,12 @@ static int mov_to_cr(int gp, int cr, str
 
         if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
         {
-            if ( vmx_pgbit_test(v) )
+            if ( hvm_paging_enabled(v) )
             {
 #if CONFIG_PAGING_LEVELS >= 3
                 /* The guest is a 32-bit PAE guest. */
                 unsigned long mfn, old_base_mfn;
-                mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
+                mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> 
PAGE_SHIFT);
                 if ( !mfn_valid(mfn) ||
                      !get_page(mfn_to_page(mfn), v->domain) )
                     goto bad_cr3;
@@ -2428,13 +2336,13 @@ static int mov_to_cr(int gp, int cr, str
 
                 HVM_DBG_LOG(DBG_LEVEL_VMMU,
                             "Update CR3 value = %lx, mfn = %lx",
-                            v->arch.hvm_vmx.cpu_cr3, mfn);
+                            v->arch.hvm_vcpu.guest_cr[3], mfn);
 #endif
             }
         }
         else if ( !(value & X86_CR4_PAE) )
         {
-            if ( unlikely(vmx_long_mode_enabled(v)) )
+            if ( unlikely(hvm_long_mode_enabled(v)) )
             {
                 HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
                             "EFER.LMA is set");
@@ -2444,8 +2352,8 @@ static int mov_to_cr(int gp, int cr, str
         }
 
         __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
-        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
-        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+        v->arch.hvm_vcpu.guest_cr[4] = value;
+        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
 
         /*
          * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
@@ -2487,7 +2395,7 @@ static void mov_from_cr(int cr, int gp, 
     switch ( cr )
     {
     case 3:
-        value = (unsigned long)v->arch.hvm_vmx.cpu_cr3;
+        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
         break;
     case 8:
         value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
@@ -2545,14 +2453,14 @@ static int vmx_cr_access(unsigned long e
         setup_fpu(v);
         __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
 
-        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
-
-        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
-        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+
+        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
         break;
     case TYPE_LMSW:
-        value = v->arch.hvm_vmx.cpu_shadow_cr0;
+        value = v->arch.hvm_vcpu.guest_cr[0];
         value = (value & ~0xF) |
             (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
         return vmx_set_cr0(value);
@@ -2943,7 +2851,7 @@ asmlinkage void vmx_vmexit_handler(struc
                 break;
             }
 
-            v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
+            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
             vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
             break;
         case TRAP_nmi:
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S       Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S       Tue Aug 07 17:30:09 2007 +0100
@@ -74,7 +74,7 @@ ENTRY(vmx_asm_do_vmentry)
         jnz  vmx_process_softirqs
 
         call vmx_intr_assist
-        movl VCPU_vmx_cr2(%ebx),%eax
+        movl VCPU_hvm_guest_cr2(%ebx),%eax
         movl %eax,%cr2
         call vmx_trace_vmentry
 
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S       Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S       Tue Aug 07 17:30:09 2007 +0100
@@ -88,7 +88,7 @@ ENTRY(vmx_asm_do_vmentry)
         jnz   vmx_process_softirqs
 
         call vmx_intr_assist
-        movq VCPU_vmx_cr2(%rbx),%rax
+        movq VCPU_hvm_guest_cr2(%rbx),%rax
         movq %rax,%cr2
         call vmx_trace_vmentry
 
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/mm.c Tue Aug 07 17:30:09 2007 +0100
@@ -394,8 +394,8 @@ void write_ptbase(struct vcpu *v)
     write_cr3(v->arch.cr3);
 }
 
-/* Should be called after CR3 is updated.
- * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
+/*
+ * Should be called after CR3 is updated.
  * 
  * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
  * for HVM guests, arch.monitor_table and hvm's guest CR3.
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Tue Aug 07 17:30:09 2007 +0100
@@ -62,7 +62,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
 unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
     struct vcpu *v, unsigned long gva)
 {
-    unsigned long gcr3 = hvm_get_guest_ctrl_reg(v, 3);
+    unsigned long gcr3 = v->arch.hvm_vcpu.guest_cr[3];
     int mode = GUEST_PAGING_LEVELS;
     int lev, index;
     paddr_t gpa = 0;
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Tue Aug 07 17:30:09 2007 +0100
@@ -636,7 +636,7 @@ static void hap_update_paging_modes(stru
         v->arch.paging.mode = &hap_paging_real_mode;
     }
 
-    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+    v->arch.paging.translate_enabled = hvm_paging_enabled(v);
 
     if ( pagetable_is_null(v->arch.monitor_table) )
     {
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Aug 07 17:30:09 2007 +0100
@@ -2266,7 +2266,7 @@ static void sh_update_paging_modes(struc
         ASSERT(shadow_mode_translate(d));
         ASSERT(shadow_mode_external(d));
 
-        v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
+        v->arch.paging.translate_enabled = hvm_paging_enabled(v);
         if ( !v->arch.paging.translate_enabled )
         {
             /* Set v->arch.guest_table to use the p2m map, and choose
@@ -2347,7 +2347,7 @@ static void sh_update_paging_modes(struc
             SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
                           "(was g=%u s=%u)\n",
                           d->domain_id, v->vcpu_id,
-                          is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
+                          is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
                           v->arch.paging.mode->guest_levels,
                           v->arch.paging.mode->shadow.shadow_levels,
                           old_mode ? old_mode->guest_levels : 0,
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Tue Aug 07 17:30:09 2007 +0100
@@ -175,7 +175,7 @@ guest_supports_superpages(struct vcpu *v
     /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
      * CR4.PSE is set or the guest is in PAE or long mode */
     return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 
-                             || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
+                             || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
 }
 
 static inline int
@@ -3525,7 +3525,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         // Is paging enabled on this vcpu?
         if ( paging_vcpu_mode_translate(v) )
         {
-            gfn = _gfn(paddr_to_pfn(hvm_get_guest_ctrl_reg(v, 3)));
+            gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
             gmfn = vcpu_gfn_to_mfn(v, gfn);
             ASSERT(mfn_valid(gmfn));
             ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
@@ -3576,11 +3576,11 @@ sh_update_cr3(struct vcpu *v, int do_loc
  
      if ( shadow_mode_external(d) && paging_vcpu_mode_translate(v) ) 
          /* Paging enabled: find where in the page the l3 table is */
-         guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
-    else
-        /* Paging disabled or PV: l3 is at the start of a page */ 
-        guest_idx = 0; 
-     
+         guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
+     else
+         /* Paging disabled or PV: l3 is at the start of a page */ 
+         guest_idx = 0; 
+
      // Ignore the low 2 bits of guest_idx -- they are really just
      // cache control.
      guest_idx &= ~3;
@@ -3718,7 +3718,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
 
 
     ///
-    /// v->arch.hvm_vcpu.hw_cr3
+    /// v->arch.hvm_vcpu.hw_cr[3]
     ///
     if ( shadow_mode_external(d) )
     {
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/x86_32/asm-offsets.c Tue Aug 07 17:30:09 2007 +0100
@@ -85,7 +85,7 @@ void __dummy__(void)
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
     BLANK();
 
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/arch/x86/x86_64/asm-offsets.c Tue Aug 07 17:30:09 2007 +0100
@@ -88,7 +88,7 @@ void __dummy__(void)
     BLANK();
 
     OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
-    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
+    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
     BLANK();
 
     OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Aug 07 17:30:09 2007 +0100
@@ -95,23 +95,13 @@ struct hvm_function_table {
 
     /*
      * Examine specifics of the guest state:
-     * 1) determine whether paging is enabled,
-     * 2) determine whether long mode is enabled,
-     * 3) determine whether PAE paging is enabled,
-     * 4) determine whether NX is enabled,
-     * 5) determine whether interrupts are enabled or not,
-     * 6) determine the mode the guest is running in,
-     * 7) return the current guest control-register value
-     * 8) return the current guest segment descriptor base
-     * 9) return the current guest segment descriptor
-     */
-    int (*paging_enabled)(struct vcpu *v);
-    int (*long_mode_enabled)(struct vcpu *v);
-    int (*pae_enabled)(struct vcpu *v);
-    int (*nx_enabled)(struct vcpu *v);
+     * 1) determine whether interrupts are enabled or not
+     * 2) determine the mode the guest is running in
+     * 3) return the current guest segment descriptor base
+     * 4) return the current guest segment descriptor
+     */
     int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
     int (*guest_x86_mode)(struct vcpu *v);
-    unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
     unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
@@ -189,38 +179,24 @@ void hvm_set_guest_time(struct vcpu *v, 
 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
 u64 hvm_get_guest_time(struct vcpu *v);
 
-static inline int
-hvm_paging_enabled(struct vcpu *v)
-{
-    return hvm_funcs.paging_enabled(v);
-}
+#define hvm_paging_enabled(v) \
+    (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
+#define hvm_pae_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_nx_enabled(v) \
+    (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
 #ifdef __x86_64__
-static inline int
-hvm_long_mode_enabled(struct vcpu *v)
-{
-    return hvm_funcs.long_mode_enabled(v);
-}
+#define hvm_long_mode_enabled(v) \
+    ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
 #else
 #define hvm_long_mode_enabled(v) (v,0)
 #endif
 
 static inline int
-hvm_pae_enabled(struct vcpu *v)
-{
-    return hvm_funcs.pae_enabled(v);
-}
-
-static inline int
 hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
 {
     return hvm_funcs.interrupts_enabled(v, type);
-}
-
-static inline int
-hvm_nx_enabled(struct vcpu *v)
-{
-    return hvm_funcs.nx_enabled(v);
 }
 
 static inline int
@@ -255,12 +231,6 @@ hvm_flush_guest_tlbs(void)
 
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
-
-static inline unsigned long
-hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
-{
-    return hvm_funcs.get_guest_ctrl_reg(v, num);
-}
 
 static inline unsigned long
 hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Tue Aug 07 17:30:09 2007 +0100
@@ -234,4 +234,6 @@ void hvm_hlt(unsigned long rflags);
 void hvm_hlt(unsigned long rflags);
 void hvm_triple_fault(void);
 
+int hvm_set_cr3(unsigned long value);
+
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h        Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/asid.h        Tue Aug 07 17:30:09 2007 +0100
@@ -32,16 +32,7 @@ void svm_asid_inv_asid(struct vcpu *v);
 void svm_asid_inv_asid(struct vcpu *v);
 void svm_asid_inc_generation(void);
 
-/*
- * ASID related, guest triggered events.
- */
-
 static inline void svm_asid_g_update_paging(struct vcpu *v)
-{
-    svm_asid_inv_asid(v);
-}
-
-static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
 {
     svm_asid_inv_asid(v);
 }
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Aug 07 17:30:09 2007 +0100
@@ -440,11 +440,6 @@ struct arch_svm_struct {
     u32                *msrpm;
     int                 launch_core;
     bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
-    unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
-    unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
-    unsigned long       cpu_shadow_efer;  /* Guest value for EFER */
-    unsigned long       cpu_cr2;
-    unsigned long       cpu_cr3;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h    Tue Aug 07 17:30:09 2007 +0100
@@ -29,7 +29,17 @@
 #define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI     1
 
 struct hvm_vcpu {
-    unsigned long       hw_cr3;     /* value we give to HW to use */
+    /* Guest control-register and EFER values, just as the guest sees them. */
+    unsigned long       guest_cr[5];
+    unsigned long       guest_efer;
+
+    /*
+     * Processor-visible CR0-4 while guest executes.
+     * Only CR3 is guaranteed to be valid: all other array entries are private
+     * to the specific HVM implementation (e.g., VMX, SVM).
+     */
+    unsigned long       hw_cr[5];
+
     struct hvm_io_op    io_op;
     struct vlapic       vlapic;
     s64                 cache_tsc_offset;
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Aug 07 17:30:09 2007 +0100
@@ -67,17 +67,11 @@ struct arch_vmx_struct {
     /* Cache of cpu execution control. */
     u32                  exec_control;
 
-    unsigned long        cpu_cr0; /* copy of guest CR0 */
-    unsigned long        cpu_shadow_cr0; /* copy of guest read shadow CR0 */
-    unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
-    unsigned long        cpu_cr2; /* save CR2 */
-    unsigned long        cpu_cr3;
 #ifdef __x86_64__
     struct vmx_msr_state msr_state;
     unsigned long        shadow_gs;
     unsigned long        cstar;
 #endif
-    unsigned long        efer;
 
     /* Following fields are all specific to vmxassist. */
     unsigned long        vmxassist_enabled:1;
diff -r 7953164cebb6 -r 0f541efbb6d6 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Aug 07 09:07:29 2007 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Aug 07 17:30:09 2007 +0100
@@ -279,8 +279,8 @@ static inline void __vmx_inject_exceptio
 
     __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
 
-    if (trap == TRAP_page_fault)
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
+    if ( trap == TRAP_page_fault )
+        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
     else
         HVMTRACE_2D(INJ_EXC, v, trap, error_code);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Clean up control-register and EFER handling., Xen patchbot-unstable <=