WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] vmx: Clean up and fix guest MSR load/save

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] vmx: Clean up and fix guest MSR load/save handling:
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 19 Jun 2008 07:10:14 -0700
Delivery-date: Thu, 19 Jun 2008 07:10:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1213870150 -3600
# Node ID 3da148fb7d9b21afd6a8c023a8e787aec86d1621
# Parent  b55f6d42668d862170df03dc42995a0600f93fc6
vmx: Clean up and fix guest MSR load/save handling:
 1. msr_bitmap/msr_area/msr_host_area must be freed when a vcpu is
    destroyed
 2. vmx_create_vmcs()/vmx_destroy_vmcs() are only ever called once,
    and can hence be simplified slightly
 3. Change vmx_*_msr() interfaces to make it crystal clear that they
    operate only on current (hence safe against vmwrite() and also
    against concurrency races).
 4. Change vmx_add_*_msr() implementation to make it crystal clear
    that msr_area is not dereferenced before it is allocated.

Only (1) is a bug fix. (2)-(4) are for code clarity.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c        |  100 ++++++++++++++++++-------------------
 xen/arch/x86/hvm/vmx/vmx.c         |    8 +-
 xen/arch/x86/hvm/vmx/vpmu_core2.c  |   20 +++----
 xen/include/asm-x86/hvm/vmx/vmcs.h |    8 +-
 4 files changed, 68 insertions(+), 68 deletions(-)

diff -r b55f6d42668d -r 3da148fb7d9b xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Jun 18 14:17:10 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Jun 19 11:09:10 2008 +0100
@@ -677,10 +677,11 @@ static int construct_vmcs(struct vcpu *v
     return 0;
 }
 
-int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val)
-{
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    const struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+int vmx_read_guest_msr(u32 msr, u64 *val)
+{
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
     for ( i = 0; i < msr_count; i++ )
     {
@@ -694,10 +695,11 @@ int vmx_read_guest_msr(struct vcpu *v, u
     return -ESRCH;
 }
 
-int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val)
-{
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+int vmx_write_guest_msr(u32 msr, u64 val)
+{
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
     for ( i = 0; i < msr_count; i++ )
     {
@@ -711,10 +713,20 @@ int vmx_write_guest_msr(struct vcpu *v, 
     return -ESRCH;
 }
 
-int vmx_add_guest_msr(struct vcpu *v, u32 msr)
-{
-    unsigned int i, msr_count = v->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.msr_area;
+int vmx_add_guest_msr(u32 msr)
+{
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+
+    if ( msr_area == NULL )
+    {
+        if ( (msr_area = alloc_xenheap_page()) == NULL )
+            return -ENOMEM;
+        curr->arch.hvm_vmx.msr_area = msr_area;
+        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
+        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+    }
 
     for ( i = 0; i < msr_count; i++ )
         if ( msr_area[i].index == msr )
@@ -723,29 +735,29 @@ int vmx_add_guest_msr(struct vcpu *v, u3
     if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
         return -ENOSPC;
 
-    if ( msr_area == NULL )
-    {
-        if ( (msr_area = alloc_xenheap_page()) == NULL )
-            return -ENOMEM;
-        v->arch.hvm_vmx.msr_area = msr_area;
-        __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(msr_area));
-        __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
-    }
-
     msr_area[msr_count].index = msr;
     msr_area[msr_count].mbz   = 0;
     msr_area[msr_count].data  = 0;
-    v->arch.hvm_vmx.msr_count = ++msr_count;
+    curr->arch.hvm_vmx.msr_count = ++msr_count;
     __vmwrite(VM_EXIT_MSR_STORE_COUNT, msr_count);
     __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, msr_count);
 
     return 0;
 }
 
-int vmx_add_host_load_msr(struct vcpu *v, u32 msr)
-{
-    unsigned int i, msr_count = v->arch.hvm_vmx.host_msr_count;
-    struct vmx_msr_entry *msr_area = v->arch.hvm_vmx.host_msr_area;
+int vmx_add_host_load_msr(u32 msr)
+{
+    struct vcpu *curr = current;
+    unsigned int i, msr_count = curr->arch.hvm_vmx.host_msr_count;
+    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.host_msr_area;
+
+    if ( msr_area == NULL )
+    {
+        if ( (msr_area = alloc_xenheap_page()) == NULL )
+            return -ENOMEM;
+        curr->arch.hvm_vmx.host_msr_area = msr_area;
+        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
+    }
 
     for ( i = 0; i < msr_count; i++ )
         if ( msr_area[i].index == msr )
@@ -754,18 +766,10 @@ int vmx_add_host_load_msr(struct vcpu *v
     if ( msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
         return -ENOSPC;
 
-    if ( msr_area == NULL )
-    {
-        if ( (msr_area = alloc_xenheap_page()) == NULL )
-            return -ENOMEM;
-        v->arch.hvm_vmx.host_msr_area = msr_area;
-        __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(msr_area));
-    }
-
     msr_area[msr_count].index = msr;
     msr_area[msr_count].mbz   = 0;
     rdmsrl(msr, msr_area[msr_count].data);
-    v->arch.hvm_vmx.host_msr_count = ++msr_count;
+    curr->arch.hvm_vmx.host_msr_count = ++msr_count;
     __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count);
 
     return 0;
@@ -776,21 +780,17 @@ int vmx_create_vmcs(struct vcpu *v)
     struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
     int rc;
 
-    if ( arch_vmx->vmcs == NULL )
-    {
-        if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
-            return -ENOMEM;
-
-        INIT_LIST_HEAD(&arch_vmx->active_list);
-        __vmpclear(virt_to_maddr(arch_vmx->vmcs));
-        arch_vmx->active_cpu = -1;
-        arch_vmx->launched   = 0;
-    }
+    if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
+        return -ENOMEM;
+
+    INIT_LIST_HEAD(&arch_vmx->active_list);
+    __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+    arch_vmx->active_cpu = -1;
+    arch_vmx->launched   = 0;
 
     if ( (rc = construct_vmcs(v)) != 0 )
     {
         vmx_free_vmcs(arch_vmx->vmcs);
-        arch_vmx->vmcs = NULL;
         return rc;
     }
 
@@ -801,13 +801,13 @@ void vmx_destroy_vmcs(struct vcpu *v)
 {
     struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
 
-    if ( arch_vmx->vmcs == NULL )
-        return;
-
     vmx_clear_vmcs(v);
 
     vmx_free_vmcs(arch_vmx->vmcs);
-    arch_vmx->vmcs = NULL;
+
+    free_xenheap_page(v->arch.hvm_vmx.host_msr_area);
+    free_xenheap_page(v->arch.hvm_vmx.msr_area);
+    free_xenheap_page(v->arch.hvm_vmx.msr_bitmap);
 }
 
 void vm_launch_fail(void)
diff -r b55f6d42668d -r 3da148fb7d9b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 18 14:17:10 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 19 11:09:10 2008 +0100
@@ -1655,7 +1655,7 @@ static int vmx_msr_read_intercept(struct
                 goto done;
         }
 
-        if ( vmx_read_guest_msr(v, ecx, &msr_content) == 0 )
+        if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
             break;
 
         if ( is_last_branch_msr(ecx) )
@@ -1817,12 +1817,12 @@ static int vmx_msr_write_intercept(struc
 
             for ( ; (rc == 0) && lbr->count; lbr++ )
                 for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
-                    if ( (rc = vmx_add_guest_msr(v, lbr->base + i)) == 0 )
+                    if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
                         vmx_disable_intercept_for_msr(v, lbr->base + i);
         }
 
         if ( (rc < 0) ||
-             (vmx_add_host_load_msr(v, ecx) < 0) )
+             (vmx_add_host_load_msr(ecx) < 0) )
             vmx_inject_hw_exception(v, TRAP_machine_check, 0);
         else
         {
@@ -1842,7 +1842,7 @@ static int vmx_msr_write_intercept(struc
         switch ( long_mode_do_msr_write(regs) )
         {
             case HNDL_unhandled:
-                if ( (vmx_write_guest_msr(v, ecx, msr_content) != 0) &&
+                if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
                      !is_last_branch_msr(ecx) )
                     wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
                 break;
diff -r b55f6d42668d -r 3da148fb7d9b xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Jun 18 14:17:10 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Thu Jun 19 11:09:10 2008 +0100
@@ -219,12 +219,12 @@ static int core2_vpmu_alloc_resource(str
         return 0;
 
     wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-    if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
-        return 0;
-
-    if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
-        return 0;
-    vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
+    if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+        return 0;
+
+    if ( vmx_add_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
+        return 0;
+    vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
 
     pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
                  (core2_get_pmc_count()-1)*sizeof(char));
@@ -347,7 +347,7 @@ static int core2_vpmu_do_wrmsr(struct cp
         break;
     case MSR_CORE_PERF_FIXED_CTR_CTRL:
         non_global_ctrl = msr_content;
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         global_ctrl >>= 32;
         for ( i = 0; i < 3; i++ )
         {
@@ -359,7 +359,7 @@ static int core2_vpmu_do_wrmsr(struct cp
         break;
     default:
         tmp = ecx - MSR_P6_EVNTSEL0;
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         if ( tmp >= 0 && tmp < core2_get_pmc_count() )
             core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
                 (global_ctrl >> tmp) & (msr_content >> 22) & 1;
@@ -385,7 +385,7 @@ static int core2_vpmu_do_wrmsr(struct cp
     if ( type != MSR_TYPE_GLOBAL )
         wrmsrl(ecx, msr_content);
     else
-        vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
 
     return 1;
 }
@@ -410,7 +410,7 @@ static int core2_vpmu_do_rdmsr(struct cp
         msr_content = core2_vpmu_cxt->global_ovf_status;
         break;
     case MSR_CORE_PERF_GLOBAL_CTRL:
-        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
         break;
     default:
         rdmsrl(regs->ecx, msr_content);
diff -r b55f6d42668d -r 3da148fb7d9b xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Jun 18 14:17:10 2008 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Jun 19 11:09:10 2008 +0100
@@ -333,10 +333,10 @@ enum vmcs_field {
 #define VMCS_VPID_WIDTH 16
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr);
-int vmx_read_guest_msr(struct vcpu *v, u32 msr, u64 *val);
-int vmx_write_guest_msr(struct vcpu *v, u32 msr, u64 val);
-int vmx_add_guest_msr(struct vcpu *v, u32 msr);
-int vmx_add_host_load_msr(struct vcpu *v, u32 msr);
+int vmx_read_guest_msr(u32 msr, u64 *val);
+int vmx_write_guest_msr(u32 msr, u64 val);
+int vmx_add_guest_msr(u32 msr);
+int vmx_add_host_load_msr(u32 msr);
 
 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] vmx: Clean up and fix guest MSR load/save handling:, Xen patchbot-unstable <=