[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()



It can easily be expressed through hvm_copy_from_guest_linear(), and in
two cases this even simplifies callers.

Suggested-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v2: Make sure this compiles standalone. Slightly adjust change in
    hvm_ud_intercept().

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1060,6 +1060,8 @@ static int __hvmemul_read(
         pfec |= PFEC_implicit;
     else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
+    if ( access_type == hvm_access_insn_fetch )
+        pfec |= PFEC_insn_fetch;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
@@ -1071,9 +1073,7 @@ static int __hvmemul_read(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, 
hvmemul_ctxt, 1);
 
-    rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
-          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
                                         hvm_access_insn_fetch,
                                         &hvmemul_ctxt->seg_reg[x86_seg_cs],
                                         &addr) &&
-             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
-                                         sizeof(hvmemul_ctxt->insn_buf),
-                                         pfec, NULL) == HVMTRANS_okay) ?
+             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        pfec | PFEC_insn_fetch,
+                                        NULL) == HVMTRANS_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3287,15 +3287,6 @@ enum hvm_translation_result hvm_copy_fro
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo)
-{
-    return __hvm_copy(buf, addr, size, current,
-                      HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
@@ -3741,16 +3732,16 @@ void hvm_ud_intercept(struct cpu_user_re
     if ( opt_hvm_fep )
     {
         const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
-        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
-            ? PFEC_user_mode : 0;
+        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
         if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         cs, &addr) &&
-             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
-                                          walk, NULL) == HVMTRANS_okay) &&
+             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+                                         walk, NULL) == HVMTRANS_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->rip += sizeof(sig);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
         (!hvm_translate_virtual_addr(
             x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_linear(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+         !hvm_copy_from_guest_linear(
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+             PFEC_insn_fetch, NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
             (!hvm_translate_virtual_addr(
                 x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
-             !hvm_fetch_from_guest_linear(
-                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+             !hvm_copy_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+                 PFEC_insn_fetch, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
     if ( rc || !bytes )
         return rc;
 
-    if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
-    else
-        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+                                    (access_type == hvm_access_insn_fetch
+                                     ? PFEC_insn_fetch : 0),
+                                    &pfinfo);
 
     switch ( rc )
     {
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
 enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo);
 
 /*
  * Get a reference on the page under an HVM physical or linear address.  If




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.