[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 17/17] x86/hvm: track large memory mapped accesses by linear address



The code in hvmemul_do_io() that tracks large reads or writes, to avoid
re-issue of component I/O, is defeated by accesses across a page boundary
because it uses physical rather than linear address. The code is also only
relevant to memory mapped I/O to or from a buffer.

This patch re-factors the code and moves it into
hvmemul_linear_mmio_access() where it is relevant and where it has
access to linear addresses.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c     |   82 +++++++++++++---------------------------
 xen/include/asm-x86/hvm/vcpu.h |   16 ++++----
 2 files changed, 36 insertions(+), 62 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8b9b7f2..288d69d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -112,28 +112,7 @@ static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }
 
-    if ( is_mmio && !data_is_addr )
-    {
-        /* Part of a multi-cycle read or write? */
-        if ( dir == IOREQ_WRITE )
-        {
-            paddr_t pa = vio->mmio_large_write_pa;
-            unsigned int bytes = vio->mmio_large_write_bytes;
-            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
-                return X86EMUL_OKAY;
-        }
-        else
-        {
-            paddr_t pa = vio->mmio_large_read_pa;
-            unsigned int bytes = vio->mmio_large_read_bytes;
-            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
-            {
-                memcpy(p_data, &vio->mmio_large_read[addr - pa],
-                       size);
-                return X86EMUL_OKAY;
-            }
-        }
-    }
+    vio = &curr->arch.hvm_vcpu.hvm_io;
 
     switch ( vio->io_req.state )
     {
@@ -161,34 +140,6 @@ static int hvmemul_do_io(
                 memcpy((void *)data, &p.data, size);
         }
 
-        if ( is_mmio && !data_is_addr )
-        {
-            /* Part of a multi-cycle read or write? */
-            if ( dir == IOREQ_WRITE )
-            {
-                paddr_t pa = vio->mmio_large_write_pa;
-                unsigned int bytes = vio->mmio_large_write_bytes;
-                if ( bytes == 0 )
-                    pa = vio->mmio_large_write_pa = addr;
-                if ( addr == (pa + bytes) )
-                    vio->mmio_large_write_bytes += size;
-            }
-            else
-            {
-                paddr_t pa = vio->mmio_large_read_pa;
-                unsigned int bytes = vio->mmio_large_read_bytes;
-                if ( bytes == 0 )
-                    pa = vio->mmio_large_read_pa = addr;
-                if ( (addr == (pa + bytes)) &&
-                     ((bytes + size) <= sizeof(vio->mmio_large_read)) )
-                {
-                    memcpy(&vio->mmio_large_read[addr - pa], (void *)data,
-                           size);
-                    vio->mmio_large_read_bytes += size;
-                }
-            }
-        }
-
         *reps = p.count;
         return X86EMUL_OKAY;
     default:
@@ -666,8 +617,9 @@ static int hvmemul_linear_mmio_access(unsigned long 
mmio_addr,
                                       uint32_t pfec,
                                       struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     unsigned long page_off = mmio_addr & (PAGE_SIZE - 1);
-    unsigned int chunk;
+    unsigned int chunk, buffer_off = 0;
     paddr_t gpa;
     unsigned long one_rep = 1;
     int rc;
@@ -677,13 +629,33 @@ static int hvmemul_linear_mmio_access(unsigned long 
mmio_addr,
                                 &one_rep, pfec, hvmemul_ctxt);
     while ( rc == X86EMUL_OKAY )
     {
-        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer);
-        if ( rc != X86EMUL_OKAY )
-            break;
+        /* Have we already done this chunk? */
+        if ( (buffer_off + chunk) <= vio->mmio_cache[dir].size )
+        {
+            if ( dir == IOREQ_READ )
+                memcpy(buffer,
+                       &vio->mmio_cache[IOREQ_READ].buffer[buffer_off],
+                       chunk);
+            else
+                ASSERT(memcmp(buffer,
+                              &vio->mmio_cache[IOREQ_WRITE].buffer[buffer_off],
+                              chunk) == 0);
+        }
+        else
+        {
+            rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer);
+            if ( rc != X86EMUL_OKAY )
+                break;
+
+            /* Note that we have now done this chunk */
+            memcpy(&vio->mmio_cache[dir].buffer[buffer_off], buffer, chunk);
+            vio->mmio_cache[dir].size += chunk;
+        }
 
         mmio_addr += chunk;
         ASSERT((mmio_addr & (PAGE_SIZE - 1)) == 0);
         buffer += chunk;
+        buffer_off += chunk;
         size -= chunk;
 
         if ( size == 0 )
@@ -1645,7 +1617,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt 
*hvmemul_ctxt,
         rc = X86EMUL_RETRY;
     if ( rc != X86EMUL_RETRY )
     {
-        vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+        memset(&vio->mmio_cache, 0, sizeof(vio->mmio_cache));
         vio->mmio_insn_bytes = 0;
     }
     else
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 2a1da4b..83f536b 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -56,13 +56,15 @@ struct hvm_vcpu_io {
     unsigned long       mmio_gva;
     unsigned long       mmio_gpfn;
 
-    /* We may read up to m256 as a number of device-model transactions. */
-    paddr_t mmio_large_read_pa;
-    uint8_t mmio_large_read[32];
-    unsigned int mmio_large_read_bytes;
-    /* We may write up to m256 as a number of device-model transactions. */
-    unsigned int mmio_large_write_bytes;
-    paddr_t mmio_large_write_pa;
+    /*
+     * We may read or write up to m256 as a number of device-model
+     * transactions.
+     */
+    struct {
+        unsigned long size;
+        uint8_t buffer[32];
+    } mmio_cache[2]; /* Indexed by ioreq type */
+
     /* For retries we shouldn't re-fetch the instruction. */
     unsigned int mmio_insn_bytes;
     unsigned char mmio_insn[16];
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.