[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/HVM: correct hvmemul_map_linear_addr() for multi-page case



The function does two translations in one go for a single guest access.
Any failure of the first translation step (guest linear -> guest
physical), resulting in #PF, ought to take precedence over any failure
of the second step (guest physical -> host physical). Bail out of the
loop early solely when translation produces HVMTRANS_bad_linear_to_gfn,
and record the most relevant of perhaps multiple different errors
otherwise. (The choice of ZERO_BLOCK_PTR as sentinel is arbitrary.)

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -531,6 +531,20 @@ static int hvmemul_do_mmio_addr(paddr_t
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+static void *update_map_err(void *err, void *new)
+{
+    if ( err == ZERO_BLOCK_PTR || err == ERR_PTR(~X86EMUL_OKAY) )
+        return new;
+
+    if ( new == ERR_PTR(~X86EMUL_OKAY) )
+        return err;
+
+    if ( err == ERR_PTR(~X86EMUL_RETRY) )
+        return new;
+
+    return err;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -544,7 +558,7 @@ static void *hvmemul_map_linear_addr(
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
-    void *err, *mapping;
+    void *err = ZERO_BLOCK_PTR, *mapping;
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
@@ -600,27 +614,28 @@ static void *hvmemul_map_linear_addr(
             goto out;
 
         case HVMTRANS_bad_gfn_to_mfn:
-            err = NULL;
-            goto out;
+            err = update_map_err(err, NULL);
+            continue;
 
         case HVMTRANS_gfn_paged_out:
         case HVMTRANS_gfn_shared:
-            err = ERR_PTR(~X86EMUL_RETRY);
-            goto out;
+            err = update_map_err(err, ERR_PTR(~X86EMUL_RETRY));
+            continue;
 
         default:
-            goto unhandleable;
+            err = update_map_err(err, ERR_PTR(~X86EMUL_UNHANDLEABLE));
+            continue;
         }
 
         *mfn++ = page_to_mfn(page);
 
         if ( p2m_is_discard_write(p2mt) )
-        {
-            err = ERR_PTR(~X86EMUL_OKAY);
-            goto out;
-        }
+            err = update_map_err(err, ERR_PTR(~X86EMUL_OKAY));
     }
 
+    if ( err != ZERO_BLOCK_PTR )
+        goto out;
+
     /* Entire access within a single frame? */
     if ( nr_frames == 1 )
         mapping = map_domain_page(hvmemul_ctxt->mfn[0]);
@@ -639,6 +654,7 @@ static void *hvmemul_map_linear_addr(
     return mapping + (linear & ~PAGE_MASK);
 
  unhandleable:
+    ASSERT(err == ZERO_BLOCK_PTR);
     err = ERR_PTR(~X86EMUL_UNHANDLEABLE);
 
  out:





_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.