WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] vmx/hvm: move mov-cr handling functions t

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] vmx/hvm: move mov-cr handling functions to generic HVM code
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Mon, 18 Apr 2011 16:25:12 +0100
Delivery-date: Mon, 18 Apr 2011 08:26:38 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1303116432 -3600
# Node ID 1276926e3795b11ef6ac2f59df900d8e0ba9f54b
# Parent  07d832ad23021445bc56fafaeb2843c94d868005
vmx/hvm: move mov-cr handling functions to generic HVM code

Currently the handling of CR accesses intercepts is done much
differently in SVM and VMX. For future usage move the VMX part
into the generic HVM path and use the exported functions.

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 07d832ad2302 -r 1276926e3795 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Mon Apr 18 09:47:12 2011 +0100
@@ -1409,6 +1409,86 @@
         return hvm_funcs.set_uc_mode(v);
 }
 
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
+{
+    struct vcpu *curr = current;
+    unsigned long val, *reg;
+
+    if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
+        goto exit_and_crash;
+    }
+
+    val = *reg;
+    HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(val));
+    HVM_DBG_LOG(DBG_LEVEL_1, "CR%u, value = %lx", cr, val);
+
+    switch ( cr )
+    {
+    case 0:
+        return hvm_set_cr0(val);
+
+    case 3:
+        return hvm_set_cr3(val);
+
+    case 4:
+        return hvm_set_cr4(val);
+
+    case 8:
+        vlapic_set_reg(vcpu_vlapic(curr), APIC_TASKPRI, ((val & 0x0f) << 4));
+        break;
+
+    default:
+        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
+        goto exit_and_crash;
+    }
+
+    return X86EMUL_OKAY;
+
+ exit_and_crash:
+    domain_crash(curr->domain);
+    return X86EMUL_UNHANDLEABLE;
+}
+
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
+{
+    struct vcpu *curr = current;
+    unsigned long val = 0, *reg;
+
+    if ( (reg = get_x86_gpr(guest_cpu_user_regs(), gpr)) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "invalid gpr: %u\n", gpr);
+        goto exit_and_crash;
+    }
+
+    switch ( cr )
+    {
+    case 0:
+    case 2:
+    case 3:
+    case 4:
+        val = curr->arch.hvm_vcpu.guest_cr[cr];
+        break;
+    case 8:
+        val = (vlapic_get_reg(vcpu_vlapic(curr), APIC_TASKPRI) & 0xf0) >> 4;
+        break;
+    default:
+        gdprintk(XENLOG_ERR, "invalid cr: %u\n", cr);
+        goto exit_and_crash;
+    }
+
+    *reg = val;
+    HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(val));
+    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%u, value = %lx", cr, val);
+
+    return X86EMUL_OKAY;
+
+ exit_and_crash:
+    domain_crash(curr->domain);
+    return X86EMUL_UNHANDLEABLE;
+}
+
 int hvm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
diff -r 07d832ad2302 -r 1276926e3795 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Apr 18 09:47:12 2011 +0100
@@ -1554,182 +1554,42 @@
         vpid_sync_vcpu_gva(curr, vaddr);
 }
 
-#define CASE_SET_REG(REG, reg)      \
-    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
-#define CASE_GET_REG(REG, reg)      \
-    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
+static int vmx_cr_access(unsigned long exit_qualification)
+{
+    struct vcpu *curr = current;
 
-#define CASE_EXTEND_SET_REG         \
-    CASE_EXTEND_REG(S)
-#define CASE_EXTEND_GET_REG         \
-    CASE_EXTEND_REG(G)
-
-#ifdef __i386__
-#define CASE_EXTEND_REG(T)
-#else
-#define CASE_EXTEND_REG(T)          \
-    CASE_ ## T ## ET_REG(R8, r8);   \
-    CASE_ ## T ## ET_REG(R9, r9);   \
-    CASE_ ## T ## ET_REG(R10, r10); \
-    CASE_ ## T ## ET_REG(R11, r11); \
-    CASE_ ## T ## ET_REG(R12, r12); \
-    CASE_ ## T ## ET_REG(R13, r13); \
-    CASE_ ## T ## ET_REG(R14, r14); \
-    CASE_ ## T ## ET_REG(R15, r15)
-#endif
-
-static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
-{
-    unsigned long value;
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-    int rc = 0;
-    unsigned long old;
-
-    switch ( gp )
+    switch ( VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification) )
     {
-    CASE_GET_REG(EAX, eax);
-    CASE_GET_REG(ECX, ecx);
-    CASE_GET_REG(EDX, edx);
-    CASE_GET_REG(EBX, ebx);
-    CASE_GET_REG(EBP, ebp);
-    CASE_GET_REG(ESI, esi);
-    CASE_GET_REG(EDI, edi);
-    CASE_GET_REG(ESP, esp);
-    CASE_EXTEND_GET_REG;
-    default:
-        gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
-        goto exit_and_crash;
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR: {
+        unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+        unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+        return hvm_mov_to_cr(cr, gp);
     }
-
-    HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
-
-    switch ( cr )
-    {
-    case 0:
-        old = v->arch.hvm_vcpu.guest_cr[0];
-        rc = !hvm_set_cr0(value);
-        if (rc)
-            hvm_memory_event_cr0(value, old);
-        return rc;
-
-    case 3:
-        old = v->arch.hvm_vcpu.guest_cr[3];
-        rc = !hvm_set_cr3(value);
-        if (rc)
-            hvm_memory_event_cr3(value, old);        
-        return rc;
-
-    case 4:
-        old = v->arch.hvm_vcpu.guest_cr[4];
-        rc = !hvm_set_cr4(value);
-        if (rc)
-            hvm_memory_event_cr4(value, old);
-        return rc; 
-
-    case 8:
-        vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
-        break;
-
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        goto exit_and_crash;
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR: {
+        unsigned long gp = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+        unsigned long cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+        return hvm_mov_from_cr(cr, gp);
     }
-
-    return 1;
-
- exit_and_crash:
-    domain_crash(v->domain);
-    return 0;
-}
-
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
-static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
-{
-    unsigned long value = 0;
-    struct vcpu *v = current;
-    struct vlapic *vlapic = vcpu_vlapic(v);
-
-    switch ( cr )
-    {
-    case 3:
-        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
-        break;
-    case 8:
-        value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
-        value = (value & 0xF0) >> 4;
-        break;
-    default:
-        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
-        domain_crash(v->domain);
-        break;
-    }
-
-    switch ( gp ) {
-    CASE_SET_REG(EAX, eax);
-    CASE_SET_REG(ECX, ecx);
-    CASE_SET_REG(EDX, edx);
-    CASE_SET_REG(EBX, ebx);
-    CASE_SET_REG(EBP, ebp);
-    CASE_SET_REG(ESI, esi);
-    CASE_SET_REG(EDI, edi);
-    CASE_SET_REG(ESP, esp);
-    CASE_EXTEND_SET_REG;
-    default:
-        printk("invalid gp: %d\n", gp);
-        domain_crash(v->domain);
-        break;
-    }
-
-    HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
-
-    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
-}
-
-static int vmx_cr_access(unsigned long exit_qualification,
-                         struct cpu_user_regs *regs)
-{
-    unsigned int gp, cr;
-    unsigned long value;
-    struct vcpu *v = current;
-
-    switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
-    {
-    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
-        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
-        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
-        return mov_to_cr(gp, cr, regs);
-    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
-        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
-        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
-        mov_from_cr(cr, gp, regs);
-        break;
-    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: 
-    {
-        unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
-        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
-        vmx_update_guest_cr(v, 0);
-
-        hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
-
+    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: {
+        unsigned long old = curr->arch.hvm_vcpu.guest_cr[0];
+        curr->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
+        vmx_update_guest_cr(curr, 0);
+        hvm_memory_event_cr0(curr->arch.hvm_vcpu.guest_cr[0], old);
         HVMTRACE_0D(CLTS);
         break;
     }
-    case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
-        value = v->arch.hvm_vcpu.guest_cr[0];
+    case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: {
+        unsigned long value = curr->arch.hvm_vcpu.guest_cr[0];
         /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
         value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
         HVMTRACE_LONG_1D(LMSW, value);
-        return !hvm_set_cr0(value);
+        return hvm_set_cr0(value);
+    }
     default:
         BUG();
     }
 
-    return 1;
+    return X86EMUL_OKAY;
 }
 
 static const struct lbr_info {
@@ -2534,7 +2394,7 @@
     case EXIT_REASON_CR_ACCESS:
     {
         exit_qualification = __vmread(EXIT_QUALIFICATION);
-        if ( vmx_cr_access(exit_qualification, regs) )
+        if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
             update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
         break;
     }
diff -r 07d832ad2302 -r 1276926e3795 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/arch/x86/traps.c      Mon Apr 18 09:47:12 2011 +0100
@@ -368,6 +368,36 @@
     vcpu_unpause(v);
 }
 
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg)
+{
+    void *p;
+
+    switch ( modrm_reg )
+    {
+    case  0: p = &regs->eax; break;
+    case  1: p = &regs->ecx; break;
+    case  2: p = &regs->edx; break;
+    case  3: p = &regs->ebx; break;
+    case  4: p = &regs->esp; break;
+    case  5: p = &regs->ebp; break;
+    case  6: p = &regs->esi; break;
+    case  7: p = &regs->edi; break;
+#if defined(__x86_64__)
+    case  8: p = &regs->r8;  break;
+    case  9: p = &regs->r9;  break;
+    case 10: p = &regs->r10; break;
+    case 11: p = &regs->r11; break;
+    case 12: p = &regs->r12; break;
+    case 13: p = &regs->r13; break;
+    case 14: p = &regs->r14; break;
+    case 15: p = &regs->r15; break;
+#endif
+    default: p = NULL; break;
+    }
+
+    return p;
+}
+
 static char *trapstr(int trapnr)
 {
     static char *strings[] = { 
diff -r 07d832ad2302 -r 1276926e3795 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/include/asm-x86/hvm/support.h Mon Apr 18 09:47:12 2011 +0100
@@ -137,5 +137,7 @@
 int hvm_set_cr4(unsigned long value);
 int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
 int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
+int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
+int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
 
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 07d832ad2302 -r 1276926e3795 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Mon Apr 18 09:47:12 2011 +0100
@@ -144,31 +144,15 @@
  * Exit Qualifications for MOV for Control Register Access
  */
  /* 3:0 - control register number (CRn) */
-#define VMX_CONTROL_REG_ACCESS_NUM      0xf
+#define VMX_CONTROL_REG_ACCESS_NUM(eq)  ((eq) & 0xf)
  /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
-#define VMX_CONTROL_REG_ACCESS_TYPE     0x30
+#define VMX_CONTROL_REG_ACCESS_TYPE(eq) (((eq) >> 4) & 0x3)
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   0
+# define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR 1
+# define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        2
+# define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        3
  /* 10:8 - general purpose register operand */
-#define VMX_CONTROL_REG_ACCESS_GPR      0xf00
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   (0 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        (2 << 4)
-#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        (3 << 4)
-#define VMX_CONTROL_REG_ACCESS_GPR_EAX  (0 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ECX  (1 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDX  (2 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBX  (3 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESP  (4 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EBP  (5 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_ESI  (6 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_EDI  (7 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R8   (8 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R9   (9 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R10  (10 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R11  (11 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R12  (12 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R13  (13 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R14  (14 << 8)
-#define VMX_CONTROL_REG_ACCESS_GPR_R15  (15 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR(eq)  (((eq) >> 8) & 0xf)
 
 /*
  * Access Rights
diff -r 07d832ad2302 -r 1276926e3795 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Mon Apr 18 05:01:19 2011 +0100
+++ b/xen/include/asm-x86/processor.h   Mon Apr 18 09:47:12 2011 +0100
@@ -589,6 +589,8 @@
 int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len);
 int microcode_resume_cpu(int cpu);
 
+unsigned long *get_x86_gpr(struct cpu_user_regs *regs, unsigned int modrm_reg);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_X86_PROCESSOR_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] vmx/hvm: move mov-cr handling functions to generic HVM code, Xen patchbot-unstable <=