[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 7/8] x86emul: move various utility functions to separate source files


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 15 Jun 2022 12:01:08 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=ohfM2VreBd5WQTy/j3TfsjM1MewtvAw3hMJKgkcBjLw=; b=BItzRopYcBzI73Y1W7dJv1iVejT9T9fOojz0qx2ZGJBE/yVg5ZdJZWhsRHBtbAwVZvWY3r/xZz1AEyBr3/dSqGp0mTvyDjB98oin9VcdogDoDp1pMvRmt994FO8xgY1nnkCqH5nx3g+y9pGl68+YN7FeTR14NUY6esGWLitPwRwq4RREvyZZlzUDQbzSEYyCmp95LaxwhEnOxYmoOQDklPaekvi7SpXpnzhgOd27rz7EagC/PzndDu8F1T++c7y8kNhD+5yENH0orspB1hU6LNiLHHAj0x6xV7vFb3GPD1AQDg141Ml7oS/XMS8oy0aLg+jLAegAZiSrflVktb9tsA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=galJlRCPHgZOhIfrk/t6X/JH+4y9wg/WLGPFFuN4AycWt1WrnQbzRrhXEuBrHjNDWcUPq7dx3JbvL4Gwin5hkGjwFO3tV/7vpVnZ8ygA4dpa96PFX89lcg0ZrWNMp5tUThHeQk7ILfBVxE+dSYkGSCO/09t9HvzMxpigaxyF4FVnkUgxAuXhp1XEMvR3z7cfjr4evthgib5LOQyG9iRKUFuUFdEraadJfjwvMpD36tF4wS0bvXGIro357roNBkAGELPAbaMSoONQBaeWj7EMQRxRg7CGjV9lKCqbMWekmjF3KtaylfpGHf8xsCyp5IOK9DyDZRbDrC8dPTvrsIy53w==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Wed, 15 Jun 2022 10:01:16 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Many are needed by the hypervisor only - have one file for this purpose.
Some are also needed by the harness (but not the fuzzer) - have another
file for these.

Code moved gets slightly adjusted in a few places, e.g. replacing
"state" by "s" (like was done for other that has been split off).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: Re-base.

--- a/tools/tests/x86_emulator/Makefile
+++ b/tools/tests/x86_emulator/Makefile
@@ -252,7 +252,7 @@ endif # 32-bit override
 
 OBJS := x86-emulate.o cpuid.o test_x86_emulator.o evex-disp8.o predicates.o 
wrappers.o
 OBJS += x86_emulate/0f01.o x86_emulate/0fae.o x86_emulate/0fc7.o
-OBJS += x86_emulate/blk.o x86_emulate/decode.o x86_emulate/fpu.o
+OBJS += x86_emulate/blk.o x86_emulate/decode.o x86_emulate/fpu.o 
x86_emulate/util.o
 
 $(TARGET): $(OBJS)
        $(HOSTCC) $(HOSTCFLAGS) -o $@ $^
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -14,7 +14,6 @@
 #include <asm/processor.h> /* current_cpu_info */
 #include <asm/xstate.h>
 #include <asm/amd.h> /* cpu_has_amd_erratum() */
-#include <asm/debugreg.h>
 
 /* Avoid namespace pollution. */
 #undef cmpxchg
@@ -26,129 +25,6 @@
 
 #include "x86_emulate/x86_emulate.c"
 
-int cf_check x86emul_read_xcr(
-    unsigned int reg, uint64_t *val, struct x86_emulate_ctxt *ctxt)
-{
-    switch ( reg )
-    {
-    case 0:
-        *val = current->arch.xcr0;
-        return X86EMUL_OKAY;
-
-    case 1:
-        if ( current->domain->arch.cpuid->xstate.xgetbv1 )
-            break;
-        /* fall through */
-    default:
-        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    *val = xgetbv(reg);
-
-    return X86EMUL_OKAY;
-}
-
-/* Note: May be called with ctxt=NULL. */
-int cf_check x86emul_write_xcr(
-    unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt)
-{
-    switch ( reg )
-    {
-    case 0:
-        break;
-
-    default:
-    gp_fault:
-        if ( ctxt )
-            x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    if ( unlikely(handle_xsetbv(reg, val) != 0) )
-        goto gp_fault;
-
-    return X86EMUL_OKAY;
-}
-
-#ifdef CONFIG_PV
-/* Called with NULL ctxt in hypercall context. */
-int cf_check x86emul_read_dr(
-    unsigned int reg, unsigned long *val, struct x86_emulate_ctxt *ctxt)
-{
-    struct vcpu *curr = current;
-
-    /* HVM support requires a bit more plumbing before it will work. */
-    ASSERT(is_pv_vcpu(curr));
-
-    switch ( reg )
-    {
-    case 0 ... 3:
-        *val = array_access_nospec(curr->arch.dr, reg);
-        break;
-
-    case 4:
-        if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE )
-            goto ud_fault;
-
-        /* Fallthrough */
-    case 6:
-        *val = curr->arch.dr6;
-        break;
-
-    case 5:
-        if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE )
-            goto ud_fault;
-
-        /* Fallthrough */
-    case 7:
-        *val = curr->arch.dr7 | curr->arch.pv.dr7_emul;
-        break;
-
-    ud_fault:
-    default:
-        if ( ctxt )
-            x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
-
-        return X86EMUL_EXCEPTION;
-    }
-
-    return X86EMUL_OKAY;
-}
-
-int cf_check x86emul_write_dr(
-    unsigned int reg, unsigned long val, struct x86_emulate_ctxt *ctxt)
-{
-    struct vcpu *curr = current;
-
-    /* HVM support requires a bit more plumbing before it will work. */
-    ASSERT(is_pv_vcpu(curr));
-
-    switch ( set_debugreg(curr, reg, val) )
-    {
-    case 0:
-        return X86EMUL_OKAY;
-
-    case -ENODEV:
-        x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
-        return X86EMUL_EXCEPTION;
-
-    default:
-        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-}
-#endif /* CONFIG_PV */
-
-int cf_check x86emul_cpuid(
-    uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res,
-    struct x86_emulate_ctxt *ctxt)
-{
-    guest_cpuid(current, leaf, subleaf, res);
-
-    return X86EMUL_OKAY;
-}
-
 /*
  * Local variables:
  * mode: C
--- a/xen/arch/x86/x86_emulate/Makefile
+++ b/xen/arch/x86/x86_emulate/Makefile
@@ -4,3 +4,5 @@ obj-y += 0fc7.o
 obj-y += blk.o
 obj-y += decode.o
 obj-$(CONFIG_HVM) += fpu.o
+obj-y += util.o
+obj-y += util-xen.o
--- a/xen/arch/x86/x86_emulate/private.h
+++ b/xen/arch/x86/x86_emulate/private.h
@@ -331,6 +331,13 @@ struct x86_emulate_state {
 #endif
 };
 
+static inline void check_state(const struct x86_emulate_state *s)
+{
+#if defined(__XEN__) && !defined(NDEBUG)
+    ASSERT(s->caller);
+#endif
+}
+
 typedef union {
     uint64_t mmx;
     uint64_t __attribute__ ((aligned(16))) xmm[2];
--- /dev/null
+++ b/xen/arch/x86/x86_emulate/util.c
@@ -0,0 +1,298 @@
+/******************************************************************************
+ * util.c
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator utility
+ * functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "private.h"
+
+unsigned int x86_insn_length(const struct x86_emulate_state *s,
+                             const struct x86_emulate_ctxt *ctxt)
+{
+    check_state(s);
+
+    return s->ip - ctxt->regs->r(ip);
+}
+
+/*
+ * This function means to return 'true' for all supported insns with explicit
+ * accesses to memory.  This means also insns which don't have an explicit
+ * memory operand (like POP), but it does not mean e.g. segment selector
+ * loads, where the descriptor table access is considered an implicit one.
+ */
+bool cf_check x86_insn_is_mem_access(const struct x86_emulate_state *s,
+                                     const struct x86_emulate_ctxt *ctxt)
+{
+    if ( mode_64bit() && s->not_64bit )
+        return false;
+
+    if ( s->ea.type == OP_MEM )
+    {
+        switch ( ctxt->opcode )
+        {
+        case 0x8d: /* LEA */
+        case X86EMUL_OPC(0x0f, 0x0d): /* PREFETCH */
+        case X86EMUL_OPC(0x0f, 0x18)
+         ... X86EMUL_OPC(0x0f, 0x1f): /* NOP space */
+        case X86EMUL_OPC_66(0x0f, 0x18)
+         ... X86EMUL_OPC_66(0x0f, 0x1f): /* NOP space */
+        case X86EMUL_OPC_F3(0x0f, 0x18)
+         ... X86EMUL_OPC_F3(0x0f, 0x1f): /* NOP space */
+        case X86EMUL_OPC_F2(0x0f, 0x18)
+         ... X86EMUL_OPC_F2(0x0f, 0x1f): /* NOP space */
+        case X86EMUL_OPC(0x0f, 0xb9): /* UD1 */
+        case X86EMUL_OPC(0x0f, 0xff): /* UD0 */
+        case X86EMUL_OPC_EVEX_66(0x0f38, 0xc6): /* V{GATH,SCATT}ERPF*D* */
+        case X86EMUL_OPC_EVEX_66(0x0f38, 0xc7): /* V{GATH,SCATT}ERPF*Q* */
+            return false;
+
+        case X86EMUL_OPC(0x0f, 0x01):
+            return (s->modrm_reg & 7) != 7; /* INVLPG */
+
+        case X86EMUL_OPC(0x0f, 0xae):
+            return (s->modrm_reg & 7) != 7; /* CLFLUSH */
+
+        case X86EMUL_OPC_66(0x0f, 0xae):
+            return (s->modrm_reg & 7) < 6; /* CLWB, CLFLUSHOPT */
+        }
+
+        return true;
+    }
+
+    switch ( ctxt->opcode )
+    {
+    case 0x06 ... 0x07:                  /* PUSH / POP %es */
+    case 0x0e:                           /* PUSH %cs */
+    case 0x16 ... 0x17:                  /* PUSH / POP %ss */
+    case 0x1e ... 0x1f:                  /* PUSH / POP %ds */
+    case 0x50 ... 0x5f:                  /* PUSH / POP reg */
+    case 0x60 ... 0x61:                  /* PUSHA / POPA */
+    case 0x68: case 0x6a:                /* PUSH imm */
+    case 0x6c ... 0x6f:                  /* INS / OUTS */
+    case 0x8f:                           /* POP r/m */
+    case 0x9a:                           /* CALL (far, direct) */
+    case 0x9c ... 0x9d:                  /* PUSHF / POPF */
+    case 0xa4 ... 0xa7:                  /* MOVS / CMPS */
+    case 0xaa ... 0xaf:                  /* STOS / LODS / SCAS */
+    case 0xc2 ... 0xc3:                  /* RET (near) */
+    case 0xc8 ... 0xc9:                  /* ENTER / LEAVE */
+    case 0xca ... 0xcb:                  /* RET (far) */
+    case 0xd7:                           /* XLAT */
+    case 0xe8:                           /* CALL (near, direct) */
+    case X86EMUL_OPC(0x0f, 0xa0):        /* PUSH %fs */
+    case X86EMUL_OPC(0x0f, 0xa1):        /* POP %fs */
+    case X86EMUL_OPC(0x0f, 0xa8):        /* PUSH %gs */
+    case X86EMUL_OPC(0x0f, 0xa9):        /* POP %gs */
+    case X86EMUL_OPC(0x0f, 0xf7):        /* MASKMOVQ */
+    case X86EMUL_OPC_66(0x0f, 0xf7):     /* MASKMOVDQU */
+    case X86EMUL_OPC_VEX_66(0x0f, 0xf7): /* VMASKMOVDQU */
+        return true;
+
+    case 0xff:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 2: /* CALL (near, indirect) */
+        case 6: /* PUSH r/m */
+            return true;
+        }
+        break;
+
+    case X86EMUL_OPC(0x0f, 0x01):
+        /* Cover CLZERO. */
+        return (s->modrm_rm & 7) == 4 && (s->modrm_reg & 7) == 7;
+    }
+
+    return false;
+}
+
+/*
+ * This function means to return 'true' for all supported insns with explicit
+ * writes to memory.  This means also insns which don't have an explicit
+ * memory operand (like PUSH), but it does not mean e.g. segment selector
+ * loads, where the (possible) descriptor table write is considered an
+ * implicit access.
+ */
+bool cf_check x86_insn_is_mem_write(const struct x86_emulate_state *s,
+                                    const struct x86_emulate_ctxt *ctxt)
+{
+    if ( mode_64bit() && s->not_64bit )
+        return false;
+
+    switch ( s->desc & DstMask )
+    {
+    case DstMem:
+        /* The SrcMem check is to cover {,V}MASKMOV{Q,DQU}. */
+        return s->modrm_mod != 3 || (s->desc & SrcMask) == SrcMem;
+
+    case DstBitBase:
+    case DstImplicit:
+        break;
+
+    default:
+        switch ( ctxt->opcode )
+        {
+        case 0x63:                         /* ARPL */
+            return !mode_64bit();
+
+        case X86EMUL_OPC_66(0x0f38, 0xf8): /* MOVDIR64B */
+        case X86EMUL_OPC_F2(0x0f38, 0xf8): /* ENQCMD */
+        case X86EMUL_OPC_F3(0x0f38, 0xf8): /* ENQCMDS */
+            return true;
+
+        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x10) ...
+             X86EMUL_OPC_EVEX_F3(0x0f38, 0x15): /* VPMOVUS* */
+        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x20) ...
+             X86EMUL_OPC_EVEX_F3(0x0f38, 0x25): /* VPMOVS* */
+        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x30) ...
+             X86EMUL_OPC_EVEX_F3(0x0f38, 0x35): /* VPMOV{D,Q,W}* */
+            return s->modrm_mod != 3;
+        }
+
+        return false;
+    }
+
+    if ( s->modrm_mod == 3 )
+    {
+        switch ( ctxt->opcode )
+        {
+        case 0xff: /* Grp5 */
+            break;
+
+        case X86EMUL_OPC(0x0f, 0x01): /* CLZERO is the odd one. */
+            return (s->modrm_rm & 7) == 4 && (s->modrm_reg & 7) == 7;
+
+        default:
+            return false;
+        }
+    }
+
+    switch ( ctxt->opcode )
+    {
+    case 0x06:                           /* PUSH %es */
+    case 0x0e:                           /* PUSH %cs */
+    case 0x16:                           /* PUSH %ss */
+    case 0x1e:                           /* PUSH %ds */
+    case 0x50 ... 0x57:                  /* PUSH reg */
+    case 0x60:                           /* PUSHA */
+    case 0x68: case 0x6a:                /* PUSH imm */
+    case 0x6c: case 0x6d:                /* INS */
+    case 0x9a:                           /* CALL (far, direct) */
+    case 0x9c:                           /* PUSHF */
+    case 0xa4: case 0xa5:                /* MOVS */
+    case 0xaa: case 0xab:                /* STOS */
+    case 0xc8:                           /* ENTER */
+    case 0xe8:                           /* CALL (near, direct) */
+    case X86EMUL_OPC(0x0f, 0xa0):        /* PUSH %fs */
+    case X86EMUL_OPC(0x0f, 0xa8):        /* PUSH %gs */
+    case X86EMUL_OPC(0x0f, 0xab):        /* BTS */
+    case X86EMUL_OPC(0x0f, 0xb3):        /* BTR */
+    case X86EMUL_OPC(0x0f, 0xbb):        /* BTC */
+        return true;
+
+    case 0xd9:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 2: /* FST m32fp */
+        case 3: /* FSTP m32fp */
+        case 6: /* FNSTENV */
+        case 7: /* FNSTCW */
+            return true;
+        }
+        break;
+
+    case 0xdb:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 1: /* FISTTP m32i */
+        case 2: /* FIST m32i */
+        case 3: /* FISTP m32i */
+        case 7: /* FSTP m80fp */
+            return true;
+        }
+        break;
+
+    case 0xdd:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 1: /* FISTTP m64i */
+        case 2: /* FST m64fp */
+        case 3: /* FSTP m64fp */
+        case 6: /* FNSAVE */
+        case 7: /* FNSTSW */
+            return true;
+        }
+        break;
+
+    case 0xdf:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 1: /* FISTTP m16i */
+        case 2: /* FIST m16i */
+        case 3: /* FISTP m16i */
+        case 6: /* FBSTP */
+        case 7: /* FISTP m64i */
+            return true;
+        }
+        break;
+
+    case 0xff:
+        switch ( s->modrm_reg & 7 )
+        {
+        case 2: /* CALL (near, indirect) */
+        case 3: /* CALL (far, indirect) */
+        case 6: /* PUSH r/m */
+            return true;
+        }
+        break;
+
+    case X86EMUL_OPC(0x0f, 0x01):
+        switch ( s->modrm_reg & 7 )
+        {
+        case 0: /* SGDT */
+        case 1: /* SIDT */
+        case 4: /* SMSW */
+            return true;
+        }
+        break;
+
+    case X86EMUL_OPC(0x0f, 0xae):
+        switch ( s->modrm_reg & 7 )
+        {
+        case 0: /* FXSAVE */
+        /* case 3: STMXCSR - handled above */
+        case 4: /* XSAVE */
+        case 6: /* XSAVEOPT */
+            return true;
+        }
+        break;
+
+    case X86EMUL_OPC(0x0f, 0xba):
+        return (s->modrm_reg & 7) > 4; /* BTS / BTR / BTC */
+
+    case X86EMUL_OPC(0x0f, 0xc7):
+        switch ( s->modrm_reg & 7 )
+        {
+        case 1: /* CMPXCHG{8,16}B */
+        case 4: /* XSAVEC */
+        case 5: /* XSAVES */
+            return true;
+        }
+        break;
+    }
+
+    return false;
+}
--- /dev/null
+++ b/xen/arch/x86/x86_emulate/util-xen.c
@@ -0,0 +1,250 @@
+/******************************************************************************
+ * util-xen.c
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator hypervisor-
+ * only utility functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "private.h"
+
+#include <xen/nospec.h>
+#include <xen/sched.h>
+#include <asm/debugreg.h>
+#include <asm/xstate.h>
+
+#ifndef NDEBUG
+void x86_emulate_free_state(struct x86_emulate_state *s)
+{
+    check_state(s);
+    s->caller = NULL;
+}
+#endif
+
+unsigned int x86_insn_opsize(const struct x86_emulate_state *s)
+{
+    check_state(s);
+
+    return s->op_bytes << 3;
+}
+
+int x86_insn_modrm(const struct x86_emulate_state *s,
+                   unsigned int *rm, unsigned int *reg)
+{
+    check_state(s);
+
+    if ( unlikely(s->modrm_mod > 3) )
+    {
+        if ( rm )
+            *rm = ~0U;
+        if ( reg )
+            *reg = ~0U;
+        return -EINVAL;
+    }
+
+    if ( rm )
+        *rm = s->modrm_rm;
+    if ( reg )
+        *reg = s->modrm_reg;
+
+    return s->modrm_mod;
+}
+
+unsigned long x86_insn_operand_ea(const struct x86_emulate_state *s,
+                                  enum x86_segment *seg)
+{
+    *seg = s->ea.type == OP_MEM ? s->ea.mem.seg : x86_seg_none;
+
+    check_state(s);
+
+    return s->ea.mem.off;
+}
+
+bool cf_check x86_insn_is_portio(const struct x86_emulate_state *s,
+                                 const struct x86_emulate_ctxt *ctxt)
+{
+    switch ( ctxt->opcode )
+    {
+    case 0x6c ... 0x6f: /* INS / OUTS */
+    case 0xe4 ... 0xe7: /* IN / OUT imm8 */
+    case 0xec ... 0xef: /* IN / OUT %dx */
+        return true;
+    }
+
+    return false;
+}
+
+bool cf_check x86_insn_is_cr_access(const struct x86_emulate_state *s,
+                                    const struct x86_emulate_ctxt *ctxt)
+{
+    switch ( ctxt->opcode )
+    {
+        unsigned int ext;
+
+    case X86EMUL_OPC(0x0f, 0x01):
+        if ( x86_insn_modrm(s, NULL, &ext) >= 0
+             && (ext & 5) == 4 ) /* SMSW / LMSW */
+            return true;
+        break;
+
+    case X86EMUL_OPC(0x0f, 0x06): /* CLTS */
+    case X86EMUL_OPC(0x0f, 0x20): /* MOV from CRn */
+    case X86EMUL_OPC(0x0f, 0x22): /* MOV to CRn */
+        return true;
+    }
+
+    return false;
+}
+
+unsigned long x86_insn_immediate(const struct x86_emulate_state *s,
+                                 unsigned int nr)
+{
+    check_state(s);
+
+    switch ( nr )
+    {
+    case 0:
+        return s->imm1;
+    case 1:
+        return s->imm2;
+    }
+
+    return 0;
+}
+
+int cf_check x86emul_read_xcr(unsigned int reg, uint64_t *val,
+                              struct x86_emulate_ctxt *ctxt)
+{
+    switch ( reg )
+    {
+    case 0:
+        *val = current->arch.xcr0;
+        return X86EMUL_OKAY;
+
+    case 1:
+        if ( current->domain->arch.cpuid->xstate.xgetbv1 )
+            break;
+        /* fall through */
+    default:
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    *val = xgetbv(reg);
+
+    return X86EMUL_OKAY;
+}
+
+/* Note: May be called with ctxt=NULL. */
+int cf_check x86emul_write_xcr(unsigned int reg, uint64_t val,
+                               struct x86_emulate_ctxt *ctxt)
+{
+    switch ( reg )
+    {
+    case 0:
+        break;
+
+    default:
+    gp_fault:
+        if ( ctxt )
+            x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    if ( unlikely(handle_xsetbv(reg, val) != 0) )
+        goto gp_fault;
+
+    return X86EMUL_OKAY;
+}
+
+#ifdef CONFIG_PV
+
+/* Called with NULL ctxt in hypercall context. */
+int cf_check x86emul_read_dr(unsigned int reg, unsigned long *val,
+                             struct x86_emulate_ctxt *ctxt)
+{
+    struct vcpu *curr = current;
+
+    /* HVM support requires a bit more plumbing before it will work. */
+    ASSERT(is_pv_vcpu(curr));
+
+    switch ( reg )
+    {
+    case 0 ... 3:
+        *val = array_access_nospec(curr->arch.dr, reg);
+        break;
+
+    case 4:
+        if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE )
+            goto ud_fault;
+
+        /* Fallthrough */
+    case 6:
+        *val = curr->arch.dr6;
+        break;
+
+    case 5:
+        if ( curr->arch.pv.ctrlreg[4] & X86_CR4_DE )
+            goto ud_fault;
+
+        /* Fallthrough */
+    case 7:
+        *val = curr->arch.dr7 | curr->arch.pv.dr7_emul;
+        break;
+
+    ud_fault:
+    default:
+        if ( ctxt )
+            x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+
+        return X86EMUL_EXCEPTION;
+    }
+
+    return X86EMUL_OKAY;
+}
+
+int cf_check x86emul_write_dr(unsigned int reg, unsigned long val,
+                              struct x86_emulate_ctxt *ctxt)
+{
+    struct vcpu *curr = current;
+
+    /* HVM support requires a bit more plumbing before it will work. */
+    ASSERT(is_pv_vcpu(curr));
+
+    switch ( set_debugreg(curr, reg, val) )
+    {
+    case 0:
+        return X86EMUL_OKAY;
+
+    case -ENODEV:
+        x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+        return X86EMUL_EXCEPTION;
+
+    default:
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+}
+
+#endif /* CONFIG_PV */
+
+int cf_check x86emul_cpuid(uint32_t leaf, uint32_t subleaf,
+                           struct cpuid_leaf *res,
+                           struct x86_emulate_ctxt *ctxt)
+{
+    guest_cpuid(current, leaf, subleaf, res);
+
+    return X86EMUL_OKAY;
+}
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -8435,393 +8435,3 @@ int x86_emulate_wrapper(
     return rc;
 }
 #endif
-
-static inline void check_state(const struct x86_emulate_state *state)
-{
-#if defined(__XEN__) && !defined(NDEBUG)
-    ASSERT(state->caller);
-#endif
-}
-
-#if defined(__XEN__) && !defined(NDEBUG)
-void x86_emulate_free_state(struct x86_emulate_state *state)
-{
-    check_state(state);
-    state->caller = NULL;
-}
-#endif
-
-unsigned int
-x86_insn_opsize(const struct x86_emulate_state *state)
-{
-    check_state(state);
-
-    return state->op_bytes << 3;
-}
-
-int
-x86_insn_modrm(const struct x86_emulate_state *state,
-               unsigned int *rm, unsigned int *reg)
-{
-    check_state(state);
-
-    if ( unlikely(state->modrm_mod > 3) )
-    {
-        if ( rm )
-            *rm = ~0U;
-        if ( reg )
-            *reg = ~0U;
-        return -EINVAL;
-    }
-
-    if ( rm )
-        *rm = state->modrm_rm;
-    if ( reg )
-        *reg = state->modrm_reg;
-
-    return state->modrm_mod;
-}
-
-unsigned long
-x86_insn_operand_ea(const struct x86_emulate_state *state,
-                    enum x86_segment *seg)
-{
-    *seg = state->ea.type == OP_MEM ? state->ea.mem.seg : x86_seg_none;
-
-    check_state(state);
-
-    return state->ea.mem.off;
-}
-
-/*
- * This function means to return 'true' for all supported insns with explicit
- * accesses to memory.  This means also insns which don't have an explicit
- * memory operand (like POP), but it does not mean e.g. segment selector
- * loads, where the descriptor table access is considered an implicit one.
- */
-bool cf_check
-x86_insn_is_mem_access(const struct x86_emulate_state *state,
-                       const struct x86_emulate_ctxt *ctxt)
-{
-    if ( mode_64bit() && state->not_64bit )
-        return false;
-
-    if ( state->ea.type == OP_MEM )
-    {
-        switch ( ctxt->opcode )
-        {
-        case 0x8d: /* LEA */
-        case X86EMUL_OPC(0x0f, 0x0d): /* PREFETCH */
-        case X86EMUL_OPC(0x0f, 0x18)
-         ... X86EMUL_OPC(0x0f, 0x1f): /* NOP space */
-        case X86EMUL_OPC_66(0x0f, 0x18)
-         ... X86EMUL_OPC_66(0x0f, 0x1f): /* NOP space */
-        case X86EMUL_OPC_F3(0x0f, 0x18)
-         ... X86EMUL_OPC_F3(0x0f, 0x1f): /* NOP space */
-        case X86EMUL_OPC_F2(0x0f, 0x18)
-         ... X86EMUL_OPC_F2(0x0f, 0x1f): /* NOP space */
-        case X86EMUL_OPC(0x0f, 0xb9): /* UD1 */
-        case X86EMUL_OPC(0x0f, 0xff): /* UD0 */
-        case X86EMUL_OPC_EVEX_66(0x0f38, 0xc6): /* V{GATH,SCATT}ERPF*D* */
-        case X86EMUL_OPC_EVEX_66(0x0f38, 0xc7): /* V{GATH,SCATT}ERPF*Q* */
-            return false;
-
-        case X86EMUL_OPC(0x0f, 0x01):
-            return (state->modrm_reg & 7) != 7; /* INVLPG */
-
-        case X86EMUL_OPC(0x0f, 0xae):
-            return (state->modrm_reg & 7) != 7; /* CLFLUSH */
-
-        case X86EMUL_OPC_66(0x0f, 0xae):
-            return (state->modrm_reg & 7) < 6; /* CLWB, CLFLUSHOPT */
-        }
-
-        return true;
-    }
-
-    switch ( ctxt->opcode )
-    {
-    case 0x06 ... 0x07: /* PUSH / POP %es */
-    case 0x0e:          /* PUSH %cs */
-    case 0x16 ... 0x17: /* PUSH / POP %ss */
-    case 0x1e ... 0x1f: /* PUSH / POP %ds */
-    case 0x50 ... 0x5f: /* PUSH / POP reg */
-    case 0x60 ... 0x61: /* PUSHA / POPA */
-    case 0x68: case 0x6a: /* PUSH imm */
-    case 0x6c ... 0x6f: /* INS / OUTS */
-    case 0x8f:          /* POP r/m */
-    case 0x9a:          /* CALL (far, direct) */
-    case 0x9c ... 0x9d: /* PUSHF / POPF */
-    case 0xa4 ... 0xa7: /* MOVS / CMPS */
-    case 0xaa ... 0xaf: /* STOS / LODS / SCAS */
-    case 0xc2 ... 0xc3: /* RET (near) */
-    case 0xc8 ... 0xc9: /* ENTER / LEAVE */
-    case 0xca ... 0xcb: /* RET (far) */
-    case 0xd7:          /* XLAT */
-    case 0xe8:          /* CALL (near, direct) */
-    case X86EMUL_OPC(0x0f, 0xa0):         /* PUSH %fs */
-    case X86EMUL_OPC(0x0f, 0xa1):         /* POP %fs */
-    case X86EMUL_OPC(0x0f, 0xa8):         /* PUSH %gs */
-    case X86EMUL_OPC(0x0f, 0xa9):         /* POP %gs */
-    CASE_SIMD_PACKED_INT_VEX(0x0f, 0xf7): /* MASKMOV{Q,DQU} */
-                                          /* VMASKMOVDQU */
-        return true;
-
-    case 0xff:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 2: /* CALL (near, indirect) */
-        case 6: /* PUSH r/m */
-            return true;
-        }
-        break;
-
-    case X86EMUL_OPC(0x0f, 0x01):
-        /* Cover CLZERO. */
-        return (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7;
-    }
-
-    return false;
-}
-
-/*
- * This function means to return 'true' for all supported insns with explicit
- * writes to memory.  This means also insns which don't have an explicit
- * memory operand (like PUSH), but it does not mean e.g. segment selector
- * loads, where the (possible) descriptor table write is considered an
- * implicit access.
- */
-bool cf_check
-x86_insn_is_mem_write(const struct x86_emulate_state *state,
-                      const struct x86_emulate_ctxt *ctxt)
-{
-    if ( mode_64bit() && state->not_64bit )
-        return false;
-
-    switch ( state->desc & DstMask )
-    {
-    case DstMem:
-        /* The SrcMem check is to cover {,V}MASKMOV{Q,DQU}. */
-        return state->modrm_mod != 3 || (state->desc & SrcMask) == SrcMem;
-
-    case DstBitBase:
-    case DstImplicit:
-        break;
-
-    default:
-        switch ( ctxt->opcode )
-        {
-        case 0x63:                         /* ARPL */
-            return !mode_64bit();
-
-        case X86EMUL_OPC_66(0x0f38, 0xf8): /* MOVDIR64B */
-        case X86EMUL_OPC_F2(0x0f38, 0xf8): /* ENQCMD */
-        case X86EMUL_OPC_F3(0x0f38, 0xf8): /* ENQCMDS */
-            return true;
-
-        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x10) ...
-             X86EMUL_OPC_EVEX_F3(0x0f38, 0x15): /* VPMOVUS* */
-        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x20) ...
-             X86EMUL_OPC_EVEX_F3(0x0f38, 0x25): /* VPMOVS* */
-        case X86EMUL_OPC_EVEX_F3(0x0f38, 0x30) ...
-             X86EMUL_OPC_EVEX_F3(0x0f38, 0x35): /* VPMOV{D,Q,W}* */
-            return state->modrm_mod != 3;
-        }
-
-        return false;
-    }
-
-    if ( state->modrm_mod == 3 )
-    {
-        switch ( ctxt->opcode )
-        {
-        case 0xff: /* Grp5 */
-            break;
-
-        case X86EMUL_OPC(0x0f, 0x01): /* CLZERO is the odd one. */
-            return (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7;
-
-        default:
-            return false;
-        }
-    }
-
-    switch ( ctxt->opcode )
-    {
-    case 0x06:                           /* PUSH %es */
-    case 0x0e:                           /* PUSH %cs */
-    case 0x16:                           /* PUSH %ss */
-    case 0x1e:                           /* PUSH %ds */
-    case 0x50 ... 0x57:                  /* PUSH reg */
-    case 0x60:                           /* PUSHA */
-    case 0x68: case 0x6a:                /* PUSH imm */
-    case 0x6c: case 0x6d:                /* INS */
-    case 0x9a:                           /* CALL (far, direct) */
-    case 0x9c:                           /* PUSHF */
-    case 0xa4: case 0xa5:                /* MOVS */
-    case 0xaa: case 0xab:                /* STOS */
-    case 0xc8:                           /* ENTER */
-    case 0xe8:                           /* CALL (near, direct) */
-    case X86EMUL_OPC(0x0f, 0xa0):        /* PUSH %fs */
-    case X86EMUL_OPC(0x0f, 0xa8):        /* PUSH %gs */
-    case X86EMUL_OPC(0x0f, 0xab):        /* BTS */
-    case X86EMUL_OPC(0x0f, 0xb3):        /* BTR */
-    case X86EMUL_OPC(0x0f, 0xbb):        /* BTC */
-        return true;
-
-    case 0xd9:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 2: /* FST m32fp */
-        case 3: /* FSTP m32fp */
-        case 6: /* FNSTENV */
-        case 7: /* FNSTCW */
-            return true;
-        }
-        break;
-
-    case 0xdb:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 1: /* FISTTP m32i */
-        case 2: /* FIST m32i */
-        case 3: /* FISTP m32i */
-        case 7: /* FSTP m80fp */
-            return true;
-        }
-        break;
-
-    case 0xdd:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 1: /* FISTTP m64i */
-        case 2: /* FST m64fp */
-        case 3: /* FSTP m64fp */
-        case 6: /* FNSAVE */
-        case 7: /* FNSTSW */
-            return true;
-        }
-        break;
-
-    case 0xdf:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 1: /* FISTTP m16i */
-        case 2: /* FIST m16i */
-        case 3: /* FISTP m16i */
-        case 6: /* FBSTP */
-        case 7: /* FISTP m64i */
-            return true;
-        }
-        break;
-
-    case 0xff:
-        switch ( state->modrm_reg & 7 )
-        {
-        case 2: /* CALL (near, indirect) */
-        case 3: /* CALL (far, indirect) */
-        case 6: /* PUSH r/m */
-            return true;
-        }
-        break;
-
-    case X86EMUL_OPC(0x0f, 0x01):
-        switch ( state->modrm_reg & 7 )
-        {
-        case 0: /* SGDT */
-        case 1: /* SIDT */
-        case 4: /* SMSW */
-            return true;
-        }
-        break;
-
-    case X86EMUL_OPC(0x0f, 0xae):
-        switch ( state->modrm_reg & 7 )
-        {
-        case 0: /* FXSAVE */
-        /* case 3: STMXCSR - handled above */
-        case 4: /* XSAVE */
-        case 6: /* XSAVEOPT */
-            return true;
-        }
-        break;
-
-    case X86EMUL_OPC(0x0f, 0xba):
-        return (state->modrm_reg & 7) > 4; /* BTS / BTR / BTC */
-
-    case X86EMUL_OPC(0x0f, 0xc7):
-        switch ( state->modrm_reg & 7 )
-        {
-        case 1: /* CMPXCHG{8,16}B */
-        case 4: /* XSAVEC */
-        case 5: /* XSAVES */
-            return true;
-        }
-        break;
-    }
-
-    return false;
-}
-
-bool cf_check
-x86_insn_is_portio(const struct x86_emulate_state *state,
-                   const struct x86_emulate_ctxt *ctxt)
-{
-    switch ( ctxt->opcode )
-    {
-    case 0x6c ... 0x6f: /* INS / OUTS */
-    case 0xe4 ... 0xe7: /* IN / OUT imm8 */
-    case 0xec ... 0xef: /* IN / OUT %dx */
-        return true;
-    }
-
-    return false;
-}
-
-bool cf_check
-x86_insn_is_cr_access(const struct x86_emulate_state *state,
-                      const struct x86_emulate_ctxt *ctxt)
-{
-    switch ( ctxt->opcode )
-    {
-        unsigned int ext;
-
-    case X86EMUL_OPC(0x0f, 0x01):
-        if ( x86_insn_modrm(state, NULL, &ext) >= 0
-             && (ext & 5) == 4 ) /* SMSW / LMSW */
-            return true;
-        break;
-
-    case X86EMUL_OPC(0x0f, 0x06): /* CLTS */
-    case X86EMUL_OPC(0x0f, 0x20): /* MOV from CRn */
-    case X86EMUL_OPC(0x0f, 0x22): /* MOV to CRn */
-        return true;
-    }
-
-    return false;
-}
-
-unsigned long
-x86_insn_immediate(const struct x86_emulate_state *state, unsigned int nr)
-{
-    check_state(state);
-
-    switch ( nr )
-    {
-    case 0:
-        return state->imm1;
-    case 1:
-        return state->imm2;
-    }
-
-    return 0;
-}
-
-unsigned int
-x86_insn_length(const struct x86_emulate_state *state,
-                const struct x86_emulate_ctxt *ctxt)
-{
-    check_state(state);
-
-    return state->ip - ctxt->regs->r(ip);
-}




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.