[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/5] x86/shadow: Drop all emulation for PV vcpus



Emulation is only performed for paging_mode_refcount() domains, which in
practice means HVM domains only.

Drop the PV emulation code.  As it always set addr_side and sp_size to
BITS_PER_LONG, it can't have worked correctly for PV guests running in a
different mode to Xen.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c | 111 +++++++---------------------------------
 xen/arch/x86/mm/shadow/multi.c  |  21 ++------
 2 files changed, 22 insertions(+), 110 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 84a87f3..2525a57 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -318,75 +318,6 @@ static const struct x86_emulate_ops 
hvm_shadow_emulator_ops = {
     .cpuid      = hvmemul_cpuid,
 };
 
-static int
-pv_emulate_read(enum x86_segment seg,
-                unsigned long offset,
-                void *p_data,
-                unsigned int bytes,
-                struct x86_emulate_ctxt *ctxt)
-{
-    unsigned int rc;
-
-    if ( !is_x86_user_segment(seg) )
-        return X86EMUL_UNHANDLEABLE;
-
-    if ( (rc = copy_from_user(p_data, (void *)offset, bytes)) != 0 )
-    {
-        x86_emul_pagefault(0, offset + bytes - rc, ctxt); /* Read fault. */
-        return X86EMUL_EXCEPTION;
-    }
-
-    return X86EMUL_OKAY;
-}
-
-static int
-pv_emulate_write(enum x86_segment seg,
-                 unsigned long offset,
-                 void *p_data,
-                 unsigned int bytes,
-                 struct x86_emulate_ctxt *ctxt)
-{
-    struct sh_emulate_ctxt *sh_ctxt =
-        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
-    struct vcpu *v = current;
-    if ( !is_x86_user_segment(seg) )
-        return X86EMUL_UNHANDLEABLE;
-    return v->arch.paging.mode->shadow.x86_emulate_write(
-        v, offset, p_data, bytes, sh_ctxt);
-}
-
-static int
-pv_emulate_cmpxchg(enum x86_segment seg,
-                   unsigned long offset,
-                   void *p_old,
-                   void *p_new,
-                   unsigned int bytes,
-                   struct x86_emulate_ctxt *ctxt)
-{
-    struct sh_emulate_ctxt *sh_ctxt =
-        container_of(ctxt, struct sh_emulate_ctxt, ctxt);
-    unsigned long old, new;
-    struct vcpu *v = current;
-
-    if ( !is_x86_user_segment(seg) || bytes > sizeof(long) )
-        return X86EMUL_UNHANDLEABLE;
-
-    old = new = 0;
-    memcpy(&old, p_old, bytes);
-    memcpy(&new, p_new, bytes);
-
-    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
-               v, offset, old, new, bytes, sh_ctxt);
-}
-
-static const struct x86_emulate_ops pv_shadow_emulator_ops = {
-    .read       = pv_emulate_read,
-    .insn_fetch = pv_emulate_read,
-    .write      = pv_emulate_write,
-    .cmpxchg    = pv_emulate_cmpxchg,
-    .cpuid      = pv_emul_cpuid,
-};
-
 const struct x86_emulate_ops *shadow_init_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
 {
@@ -394,17 +325,13 @@ const struct x86_emulate_ops *shadow_init_emulation(
     struct vcpu *v = current;
     unsigned long addr;
 
+    ASSERT(has_hvm_container_vcpu(v));
+
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
-    if ( is_pv_vcpu(v) )
-    {
-        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG;
-        return &pv_shadow_emulator_ops;
-    }
-
     /* Segment cache initialisation. Primed with CS. */
     creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
 
@@ -441,24 +368,24 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
     struct vcpu *v = current;
     unsigned long addr, diff;
 
-    /* We don't refetch the segment bases, because we don't emulate
-     * writes to segment registers */
+    ASSERT(has_hvm_container_vcpu(v));
 
-    if ( is_hvm_vcpu(v) )
-    {
-        diff = regs->eip - sh_ctxt->insn_buf_eip;
-        if ( diff > sh_ctxt->insn_buf_bytes )
-        {
-            /* Prefetch more bytes. */
-            sh_ctxt->insn_buf_bytes =
-                (!hvm_translate_linear_addr(
-                    x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
-                    hvm_access_insn_fetch, sh_ctxt, &addr) &&
-                 !hvm_fetch_from_guest_linear(
-                     sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, 
NULL))
-                ? sizeof(sh_ctxt->insn_buf) : 0;
-            sh_ctxt->insn_buf_eip = regs->eip;
-        }
+    /*
+     * We don't refetch the segment bases, because we don't emulate
+     * writes to segment registers
+     */
+    diff = regs->eip - sh_ctxt->insn_buf_eip;
+    if ( diff > sh_ctxt->insn_buf_bytes )
+    {
+        /* Prefetch more bytes. */
+        sh_ctxt->insn_buf_bytes =
+            (!hvm_translate_linear_addr(
+                x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
+                hvm_access_insn_fetch, sh_ctxt, &addr) &&
+             !hvm_fetch_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+            ? sizeof(sh_ctxt->insn_buf) : 0;
+        sh_ctxt->insn_buf_eip = regs->eip;
     }
 }
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 67c98b9..713f23d 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3382,12 +3382,7 @@ static int sh_page_fault(struct vcpu *v,
               (((emul_ctxt.ctxt.event.vector == TRAP_gp_fault) ||
                 (emul_ctxt.ctxt.event.vector == TRAP_stack_error)) &&
                emul_ctxt.ctxt.event.error_code == 0)) )
-        {
-            if ( has_hvm_container_domain(d) )
-                hvm_inject_event(&emul_ctxt.ctxt.event);
-            else
-                pv_inject_event(&emul_ctxt.ctxt.event);
-        }
+            hvm_inject_event(&emul_ctxt.ctxt.event);
         else
         {
             SHADOW_PRINTK(
@@ -3447,12 +3442,7 @@ static int sh_page_fault(struct vcpu *v,
 #endif
 
     if ( emul_ctxt.ctxt.retire.singlestep )
-    {
-        if ( has_hvm_container_domain(d) )
-            hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
-        else
-            pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
-    }
+        hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 
 #if GUEST_PAGING_LEVELS == 3 /* PAE guest */
     /*
@@ -3493,12 +3483,7 @@ static int sh_page_fault(struct vcpu *v,
                 TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_LAST_FAILED);
 
                 if ( emul_ctxt.ctxt.retire.singlestep )
-                {
-                    if ( has_hvm_container_domain(d) )
-                        hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
-                    else
-                        pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
-                }
+                    hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
 
                 break; /* Don't emulate again if we failed! */
             }
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.