[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 6/6] x86/vvmx: fix I/O and MSR bitmaps mapping



Currently Xen tries to map bitmaps during emulation of vmptrld and
vmwrite. This is wrong: the guest can store arbitrary values in those
fields.

Make bitmaps mapping happen only during a nested vmentry and only if
the appropriate execution controls are turned on by L1 hypervisor.
Mapping happens only:

    1. During the first nested vmentry
    2. After L1 has changed an appropriate vmcs field
    3. After nvmx_purge_vvmcs() was previously called

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vvmx.c | 104 +++++++++++++++++++++++-------------
 1 file changed, 67 insertions(+), 37 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 2c2ba36d94..e10ed79f66 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -750,6 +750,17 @@ static void __clear_current_vvmcs(struct vcpu *v)
         __vmpclear(nvcpu->nv_n2vmcx_pa);
 }
 
+static void unmap_msr_bitmap(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+
+    if ( nvmx->msrbitmap )
+    {
+        hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
+        nvmx->msrbitmap = NULL;
+    }
+}
+
 /*
  * Refreshes the MSR bitmap mapping for the current nested vcpu.  Returns true
  * for a successful mapping, and returns false for MSR_BITMAP parameter errors
@@ -760,12 +771,7 @@ static bool __must_check _map_msr_bitmap(struct vcpu *v)
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     uint64_t gpa;
 
-    if ( nvmx->msrbitmap )
-    {
-        hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
-        nvmx->msrbitmap = NULL;
-    }
-
+    unmap_msr_bitmap(v);
     gpa = get_vvmcs(v, MSR_BITMAP);
 
     if ( !IS_ALIGNED(gpa, PAGE_SIZE) )
@@ -776,6 +782,17 @@ static bool __must_check _map_msr_bitmap(struct vcpu *v)
     return nvmx->msrbitmap != NULL;
 }
 
+static void unmap_io_bitmap(struct vcpu *v, unsigned int idx)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+
+    if ( nvmx->iobitmap[idx] )
+    {
+        hvm_unmap_guest_frame(nvmx->iobitmap[idx], 1);
+        nvmx->iobitmap[idx] = NULL;
+    }
+}
+
 static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
@@ -783,8 +800,7 @@ static bool_t __must_check _map_io_bitmap(struct vcpu *v, 
u64 vmcs_reg)
     int index;
 
     index = vmcs_reg == IO_BITMAP_A ? 0 : 1;
-    if (nvmx->iobitmap[index])
-        hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
+    unmap_io_bitmap(v, index);
     gpa = get_vvmcs(v, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
 
@@ -799,7 +815,6 @@ static inline bool_t __must_check map_io_bitmap_all(struct 
vcpu *v)
 
 static void nvmx_purge_vvmcs(struct vcpu *v)
 {
-    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     int i;
 
@@ -809,16 +824,11 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
     nvcpu->nv_vvmcx = NULL;
     nvcpu->nv_vvmcxaddr = INVALID_PADDR;
     v->arch.hvm.vmx.vmcs_shadow_maddr = 0;
-    for (i=0; i<2; i++) {
-        if ( nvmx->iobitmap[i] ) {
-            hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
-            nvmx->iobitmap[i] = NULL;
-        }
-    }
-    if ( nvmx->msrbitmap ) {
-        hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
-        nvmx->msrbitmap = NULL;
-    }
+
+    for ( i = 0; i < 2; i++ )
+        unmap_io_bitmap(v, i);
+
+    unmap_msr_bitmap(v);
 }
 
 u64 nvmx_get_tsc_offset(struct vcpu *v)
@@ -1594,20 +1604,34 @@ static void clear_vvmcs_launched(struct list_head 
*launched_list,
     }
 }
 
-static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
+static enum vmx_insn_errno nvmx_vmresume(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned int exec_ctrl;
 
-    /* check VMCS is valid and IO BITMAP is set */
-    if ( vvmcx_valid(v) &&
-            ((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
-            !(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
-        nvcpu->nv_vmentry_pending = 1;
-    else
-        vmfail_invalid(regs);
+    ASSERT(vvmcx_valid(v));
+    exec_ctrl = __n2_exec_control(v);
 
-    return X86EMUL_OKAY;
+    if ( exec_ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
+    {
+        if ( (nvmx->iobitmap[0] == NULL || nvmx->iobitmap[1] == NULL) &&
+             !map_io_bitmap_all(v) )
+            goto invalid_control_state;
+    }
+
+    if ( exec_ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
+    {
+        if ( nvmx->msrbitmap == NULL && !_map_msr_bitmap(v) )
+            goto invalid_control_state;
+    }
+
+    nvcpu->nv_vmentry_pending = 1;
+
+    return VMX_INSN_SUCCEED;
+
+invalid_control_state:
+    return VMX_INSN_INVALID_CONTROL_STATE;
 }
 
 int nvmx_handle_vmresume(struct cpu_user_regs *regs)
@@ -1641,7 +1665,12 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
         vmfail_valid(regs, VMX_INSN_VMRESUME_NONLAUNCHED_VMCS);
         return X86EMUL_OKAY;
     }
-    return nvmx_vmresume(v,regs);
+
+    rc = nvmx_vmresume(v);
+    if ( rc )
+        vmfail_valid(regs, rc);
+
+    return X86EMUL_OKAY;
 }
 
 int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
@@ -1676,13 +1705,16 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;
     }
     else {
-        rc = nvmx_vmresume(v,regs);
-        if ( rc == X86EMUL_OKAY )
+        rc = nvmx_vmresume(v);
+        if ( rc )
+            vmfail_valid(regs, rc);
+        else
         {
             if ( set_vvmcs_launched(&nvmx->launched_list,
                                     
PFN_DOWN(v->arch.hvm.vmx.vmcs_shadow_maddr)) < 0 )
                 return X86EMUL_UNHANDLEABLE;
         }
+        rc = X86EMUL_OKAY;
     }
     return rc;
 }
@@ -1746,9 +1778,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
                 vvmcx = NULL;
             }
         }
-        if ( !vvmcx ||
-             !map_io_bitmap_all(v) ||
-             !_map_msr_bitmap(v) )
+        else
         {
             vmfail(regs, VMX_INSN_VMPTRLD_INVALID_PHYADDR);
             goto out;
@@ -1926,13 +1956,13 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
     switch ( vmcs_encoding & ~VMCS_HIGH(0) )
     {
     case IO_BITMAP_A:
-        okay = _map_io_bitmap(v, IO_BITMAP_A);
+        unmap_io_bitmap(v, 0);
         break;
     case IO_BITMAP_B:
-        okay = _map_io_bitmap(v, IO_BITMAP_B);
+        unmap_io_bitmap(v, 1);
         break;
     case MSR_BITMAP:
-        okay = _map_msr_bitmap(v);
+        unmap_msr_bitmap(v);
         break;
     }
 
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.