[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7/9] x86/vvmx: Use correct sizes when reading operands



The sizes of VMX operands are defined in the Intel SDM and have nothing
to do with the addr_size field of struct vmx_inst_info:

    invept:   r32/r64, m128
    invvpid:  r32/r64, m128
    vmclear:  m64
    vmptrld:  m64
    vmptrst:  m64
    vmread:   r32/64 or m32/64, r32/64
    vmwrite:  r32/r64, r32/64 or m32/64
    vmon:     m64

* Register operands are 32-bit or 64-bit depending on the guest mode.

* Memory operands are almost always of fixed size, usually 64-bit, but
  for vmread and vmwrite their size depends on the guest mode.

* invept has a 128-bit memory operand but the upper 64 bits are reserved
  and therefore need not be read.

* invvpid has a 128-bit memory operand but we only require the VPID value
  which lies in the lower 64 bits.

When reading variable-size operands, we pass the operand size calculated
by decode_vmx_inst() and stored in strcut vmx_inst_op.   When reading
fixed-size operands, we pass the size of the variable into which the
operand is to be read.

Signed-off-by: Euan Harris <euan.harris@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vvmx.c | 48 +++++++++++++++++++++++----------------------
 1 file changed, 25 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index fc2123c7c0..9a4e6177ad 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -197,11 +197,9 @@ struct vmx_inst_decoded {
 #define VMX_INST_MEMREG_TYPE_REG    1
     struct vmx_inst_op {
         int type;
+        unsigned int bytes;
         union {
-            struct {
-                unsigned long mem;
-                unsigned int  len;
-            };
+            unsigned long mem;
             unsigned int reg_idx;
         };
     } op[2];
@@ -464,6 +462,8 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
     unsigned long base, index, seg_base, disp, offset;
     int scale, size;
 
+    unsigned int bytes = vmx_guest_x86_mode(v);
+
     if ( vmx_inst_check_privilege(regs, vmxon_check) != X86EMUL_OKAY )
         return X86EMUL_EXCEPTION;
 
@@ -473,10 +473,11 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
     if ( info.fields.memreg ) {
         decode->op[0].type = VMX_INST_MEMREG_TYPE_REG;
         decode->op[0].reg_idx = info.fields.reg1;
+        decode->op[0].bytes = bytes;
     }
     else
     {
-        bool mode_64bit = (vmx_guest_x86_mode(v) == 8);
+        bool mode_64bit = (bytes == 8);
 
         decode->op[0].type = VMX_INST_MEMREG_TYPE_MEMORY;
 
@@ -508,11 +509,12 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
             goto gp_fault;
 
         decode->op[0].mem = base;
-        decode->op[0].len = size;
+        decode->op[0].bytes = bytes;
     }
 
     decode->op[1].type = VMX_INST_MEMREG_TYPE_REG;
     decode->op[1].reg_idx = info.fields.reg2;
+    decode->op[1].bytes = bytes;
 
     return X86EMUL_OKAY;
 
@@ -1494,7 +1496,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     struct vmx_inst_decoded decode;
-    unsigned long gpa = 0;
+    uint64_t gpa;
     uint32_t nvmcs_revid;
     int rc;
 
@@ -1502,7 +1504,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    rc = operand_read(&gpa, &decode.op[0], regs, decode.op[0].len);
+    rc = operand_read(&gpa, &decode.op[0], regs, sizeof(gpa));
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1715,14 +1717,14 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
     struct vcpu *v = current;
     struct vmx_inst_decoded decode;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
-    unsigned long gpa = 0;
+    uint64_t gpa;
     int rc;
 
     rc = decode_vmx_inst(regs, &decode, 0);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    rc = operand_read(&gpa, &decode.op[0], regs, decode.op[0].len);
+    rc = operand_read(&gpa, &decode.op[0], regs, sizeof(gpa));
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1801,7 +1803,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
     gpa = nvcpu->nv_vvmcxaddr;
 
     rc = hvm_copy_to_guest_linear(decode.op[0].mem, &gpa,
-                                  decode.op[0].len, 0, &pfinfo);
+                                  decode.op[0].bytes, 0, &pfinfo);
     if ( rc == HVMTRANS_bad_linear_to_gfn )
         hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     if ( rc != HVMTRANS_okay )
@@ -1817,7 +1819,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
     struct vmx_inst_decoded decode;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-    unsigned long gpa = 0;
+    uint64_t gpa;
     void *vvmcs;
     int rc;
 
@@ -1825,7 +1827,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    rc = operand_read(&gpa, &decode.op[0], regs, decode.op[0].len);
+    rc = operand_read(&gpa, &decode.op[0], regs, sizeof(gpa));
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1886,7 +1888,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;
     }
 
-    rc = operand_read(&vmcs_encoding, &decode.op[1], regs, decode.op[1].len);
+    rc = operand_read(&vmcs_encoding, &decode.op[1], regs, decode.op[1].bytes);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1900,7 +1902,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
     switch ( decode.op[0].type ) {
     case VMX_INST_MEMREG_TYPE_MEMORY:
         rc = hvm_copy_to_guest_linear(decode.op[0].mem, &value,
-                                      decode.op[0].len, 0, &pfinfo);
+                                      decode.op[0].bytes, 0, &pfinfo);
         if ( rc == HVMTRANS_bad_linear_to_gfn )
             hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
         if ( rc != HVMTRANS_okay )
@@ -1928,7 +1930,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
     if ( decode_vmx_inst(regs, &decode, 0) != X86EMUL_OKAY )
         return X86EMUL_EXCEPTION;
 
-    rc = operand_read(&operand, &decode.op[0], regs, decode.op[0].len);
+    rc = operand_read(&operand, &decode.op[0], regs, decode.op[0].bytes);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1938,7 +1940,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;
     }
 
-    rc = operand_read(&vmcs_encoding, &decode.op[1], regs, decode.op[1].len);
+    rc = operand_read(&vmcs_encoding, &decode.op[1], regs, decode.op[1].bytes);
     if ( rc != X86EMUL_OKAY )
         return rc;
 
@@ -1973,13 +1975,13 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
 int nvmx_handle_invept(struct cpu_user_regs *regs)
 {
     struct vmx_inst_decoded decode;
-    unsigned long invept_type = 0;
+    uint64_t invept_type;
     int ret;
 
     if ( (ret = decode_vmx_inst(regs, &decode, 0)) != X86EMUL_OKAY )
         return ret;
 
-    ret = operand_read(&invept_type, &decode.op[1], regs, decode.op[1].len);
+    ret = operand_read(&invept_type, &decode.op[1], regs, decode.op[1].bytes);
     if ( ret != X86EMUL_OKAY )
         return ret;
 
@@ -1987,9 +1989,9 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
     {
     case INVEPT_SINGLE_CONTEXT:
     {
-        unsigned long eptp;
+        uint64_t eptp;
 
-        ret = operand_read(&eptp, &decode.op[0], regs, decode.op[0].len);
+        ret = operand_read(&eptp, &decode.op[0], regs, sizeof(eptp));
         if ( ret )
             return ret;
 
@@ -2011,13 +2013,13 @@ int nvmx_handle_invept(struct cpu_user_regs *regs)
 int nvmx_handle_invvpid(struct cpu_user_regs *regs)
 {
     struct vmx_inst_decoded decode;
-    unsigned long invvpid_type = 0;
+    uint64_t invvpid_type;
     int ret;
 
     if ( (ret = decode_vmx_inst(regs, &decode, 0)) != X86EMUL_OKAY )
         return ret;
 
-    ret = operand_read(&invvpid_type, &decode.op[1], regs, decode.op[1].len);
+    ret = operand_read(&invvpid_type, &decode.op[1], regs, decode.op[1].bytes);
     if ( ret != X86EMUL_OKAY )
         return ret;
 
-- 
2.13.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.