[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 16/17] x86/hvm: remove multiple open coded 'chunking' loops



...in hvmemul_read/write()

Add hvmemul_phys_mmio_access() and hvmemul_linear_mmio_access() functions
to reduce code duplication.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c |  244 +++++++++++++++++++++++++-------------------
 1 file changed, 140 insertions(+), 104 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 0d748e7..467f2da 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -619,6 +619,127 @@ static int hvmemul_virtual_to_linear(
     return X86EMUL_EXCEPTION;
 }
 
+static int hvmemul_phys_mmio_access(paddr_t gpa,
+                                    unsigned int size,
+                                    uint8_t dir,
+                                    uint8_t *buffer,
+                                    unsigned int *off)
+{
+    unsigned long one_rep = 1;
+    unsigned int chunk;
+    int rc = 0;
+
+    /* Accesses must fall within a page */
+    if ( (gpa & (PAGE_SIZE - 1)) + size > PAGE_SIZE )
+        return X86EMUL_UNHANDLEABLE;
+
+    /*
+     * hvmemul_do_io() cannot handle non-power-of-2 accesses or
+     * accesses larger than sizeof(long), so choose the highest power
+     * of 2 not exceeding sizeof(long) as the 'chunk' size.
+     */
+    chunk = 1 << (fls(size) - 1);
+    if ( chunk > sizeof (long) )
+        chunk = sizeof (long);
+
+    while ( size != 0 )
+    {
+        rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+                                    &buffer[*off]);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        /* Advance to the next chunk */
+        gpa += chunk;
+        *off += chunk;
+        size -= chunk;
+
+        /*
+         * If the chunk now exceeds the remaining size, choose the next
+         * lowest power of 2 that will fit.
+         */
+        while ( chunk > size )
+            chunk >>= 1;
+    }
+
+    return rc;
+}
+
+static int hvmemul_phys_mmio_read(paddr_t gpa,
+                                  unsigned int size,
+                                  uint8_t *buffer,
+                                  unsigned int *off)
+{
+    return hvmemul_phys_mmio_access(gpa, size, IOREQ_READ, buffer,
+                                    off);
+}
+
+static int hvmemul_phys_mmio_write(paddr_t gpa,
+                                   unsigned int size,
+                                   uint8_t *buffer,
+                                   unsigned int *off)
+{
+    return hvmemul_phys_mmio_access(gpa, size, IOREQ_WRITE, buffer,
+                                    off);
+}
+
+static int hvmemul_linear_mmio_access(unsigned long mmio_addr,
+                                      unsigned int size,
+                                      uint8_t dir,
+                                      uint8_t *buffer,
+                                      uint32_t pfec,
+                                      struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    unsigned long page_off = mmio_addr & (PAGE_SIZE - 1);
+    unsigned int chunk, buffer_off = 0;
+    paddr_t gpa;
+    unsigned long one_rep = 1;
+    int rc;
+
+    chunk = min_t(unsigned int, size, PAGE_SIZE - page_off);
+    rc = hvmemul_linear_to_phys(mmio_addr, &gpa, chunk,
+                                &one_rep, pfec, hvmemul_ctxt);
+    while ( rc == X86EMUL_OKAY )
+    {
+        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer,
+                                      &buffer_off);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        mmio_addr += chunk;
+        size -= chunk;
+
+        if ( size == 0 )
+            break;
+
+        chunk = min_t(unsigned int, size, PAGE_SIZE);
+        rc = hvmemul_linear_to_phys(mmio_addr, &gpa, chunk,
+                                    &one_rep, pfec, hvmemul_ctxt);
+    }
+
+    return rc;
+}
+
+static int hvmemul_linear_mmio_read(unsigned long mmio_addr,
+                                    unsigned int size,
+                                    uint8_t *buffer,
+                                    uint32_t pfec,
+                                    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    return hvmemul_linear_mmio_access(mmio_addr, size, IOREQ_READ, buffer,
+                                      pfec, hvmemul_ctxt);
+}
+
+static int hvmemul_linear_mmio_write(unsigned long mmio_addr,
+                                     unsigned int size,
+                                     uint8_t *buffer,
+                                     uint32_t pfec,
+                                     struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    return hvmemul_linear_mmio_access(mmio_addr, size, IOREQ_WRITE, buffer,
+                                      pfec, hvmemul_ctxt);
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -628,52 +749,28 @@ static int __hvmemul_read(
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     struct vcpu *curr = current;
-    unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
+    unsigned long addr, one_rep = 1;
     uint32_t pfec = PFEC_page_present;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
-        seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
+        seg, offset, bytes, &one_rep, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /*
-     * We only need to handle sizes actual instruction operands can have. All
-     * such sizes are either powers of 2 or the sum of two powers of 2. Thus
-     * picking as initial chunk size the largest power of 2 not greater than
-     * the total size will always result in only power-of-2 size requests
-     * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2).
-     */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
 
     if ( ((access_type != hvm_access_insn_fetch
            ? vio->mmio_access.read_access
            : vio->mmio_access.insn_fetch)) &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
+        unsigned int off = 0;
 
-        return X86EMUL_UNHANDLEABLE;
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) |
+               (addr & (PAGE_SIZE - 1)));
+
+        return hvmemul_phys_mmio_read(gpa, bytes, p_data, &off);
     }
 
     if ( (seg != x86_seg_none) &&
@@ -693,30 +790,9 @@ static int __hvmemul_read(
     case HVMCOPY_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+
+        return hvmemul_linear_mmio_read(addr, bytes, p_data,
+                                        pfec, hvmemul_ctxt);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
@@ -781,44 +857,26 @@ static int hvmemul_write(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     struct vcpu *curr = current;
-    unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
+    unsigned long addr, one_rep = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
-        seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
+        seg, offset, bytes, &one_rep, hvm_access_write, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /* See the respective comment in __hvmemul_read(). */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
 
     if ( vio->mmio_access.write_access &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
     {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
+        unsigned int off = 0;
 
-        return X86EMUL_UNHANDLEABLE;
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) |
+               (addr & (PAGE_SIZE - 1)));
+
+        return hvmemul_phys_mmio_write(gpa, bytes, p_data, &off);
     }
 
     if ( (seg != x86_seg_none) &&
@@ -834,30 +892,8 @@ static int hvmemul_write(
     case HVMCOPY_bad_gva_to_gfn:
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+        return hvmemul_linear_mmio_write(addr, bytes, p_data,
+                                         pfec, hvmemul_ctxt);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.