WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] improve the mapcache

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] improve the mapcache
From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Date: Fri, 27 Mar 2009 16:58:20 +0000
Delivery-date: Fri, 27 Mar 2009 10:06:48 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.14 (X11/20080505)
Hi all,
this patch improves qemu-xen's mapcache to be able to guarantee that
certain mappings are going to last until explicitly unmapped.
This capability is necessary to implement cpu_physical_memory_map and
cpu_physical_memory_unmap that are part of the new dma api.
This patch also provides the implementation of these two functions,
removing the one based on bounce buffers that we are currently using.

Current DMA throughput: ~10MB/s
DMA throughput with this patch:  ~27MB/s

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

---

diff --git a/hw/xen_machine_fv.c b/hw/xen_machine_fv.c
index 9bca50a..f0b7952 100644
--- a/hw/xen_machine_fv.c
+++ b/hw/xen_machine_fv.c
@@ -34,16 +34,6 @@
 
 #if defined(MAPCACHE)
 
-#if defined(__i386__) 
-#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
-#define MCACHE_BUCKET_SHIFT 16
-#elif defined(__x86_64__)
-#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
-#define MCACHE_BUCKET_SHIFT 20
-#endif
-
-#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
-
 #define BITS_PER_LONG (sizeof(long)*8)
 #define BITS_TO_LONGS(bits) \
     (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
@@ -56,10 +46,19 @@ struct map_cache {
     unsigned long paddr_index;
     uint8_t      *vaddr_base;
     DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT);
+    uint8_t lock;
+    struct map_cache *next;
+};
+
+struct map_cache_rev {
+    uint8_t      *vaddr_req;
+    unsigned long paddr_index;
+    TAILQ_ENTRY(map_cache_rev) next;
 };
 
 static struct map_cache *mapcache_entry;
 static unsigned long nr_buckets;
+TAILQ_HEAD(map_cache_head, map_cache_rev) locked_entries = 
TAILQ_HEAD_INITIALIZER(locked_entries);
 
 /* For most cases (>99.9%), the page address is the same. */
 static unsigned long last_address_index = ~0UL;
@@ -129,20 +128,29 @@ static void qemu_remap_bucket(struct map_cache *entry,
     }
 }
 
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock)
 {
-    struct map_cache *entry;
+    struct map_cache *entry, *pentry = NULL;
     unsigned long address_index  = phys_addr >> MCACHE_BUCKET_SHIFT;
     unsigned long address_offset = phys_addr & (MCACHE_BUCKET_SIZE-1);
 
-    if (address_index == last_address_index)
+    if (address_index == last_address_index && !lock)
         return last_address_vaddr + address_offset;
 
     entry = &mapcache_entry[address_index % nr_buckets];
 
-    if (entry->vaddr_base == NULL || entry->paddr_index != address_index ||
-        !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
+    while (entry && entry->lock && entry->paddr_index != address_index && 
entry->vaddr_base) {
+        pentry = entry;
+        entry = entry->next;
+    }
+    if (!entry) {
+        entry = qemu_mallocz(sizeof(struct map_cache));
+        pentry->next = entry;
         qemu_remap_bucket(entry, address_index);
+    } else if (!entry->lock) {
+        if (!entry->vaddr_base || entry->paddr_index != address_index || 
!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping))
+            qemu_remap_bucket(entry, address_index);
+    }
 
     if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping)) {
         last_address_index = ~0UL;
@@ -151,13 +159,78 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr)
 
     last_address_index = address_index;
     last_address_vaddr = entry->vaddr_base;
+    if (lock) {
+        struct map_cache_rev *reventry = qemu_mallocz(sizeof(struct 
map_cache_rev));
+        entry->lock++;
+        reventry->vaddr_req = last_address_vaddr + address_offset;
+        reventry->paddr_index = last_address_index;
+        TAILQ_INSERT_TAIL(&locked_entries, reventry, next);
+    }
 
     return last_address_vaddr + address_offset;
 }
 
+void qemu_invalidate_entry(uint8_t *buffer)
+{
+    struct map_cache *entry = NULL, *next;
+    struct map_cache_rev *reventry;
+    unsigned long paddr_index;
+    int found = 0;
+    
+    if (last_address_vaddr == buffer) {
+        last_address_index =  ~0UL;
+        last_address_vaddr = NULL;
+    }
+
+    TAILQ_FOREACH(reventry, &locked_entries, next) {
+        if (reventry->vaddr_req == buffer) {
+            paddr_index = reventry->paddr_index;
+            found = 1;
+            break;
+        }
+    }
+    if (!found) {
+        fprintf(stderr, "qemu_invalidate_entry: could not find %p\n", buffer);
+        TAILQ_FOREACH(reventry, &locked_entries, next) {
+            fprintf(stderr, "   %lx -> %p is present\n", 
reventry->paddr_index, reventry->vaddr_req);
+        }
+        return;
+    }
+    TAILQ_REMOVE(&locked_entries, reventry, next);
+    qemu_free(reventry);
+
+    next = &mapcache_entry[paddr_index];
+    if (next->paddr_index == paddr_index) {
+        next->lock--;
+        return;
+    }
+
+    while (next != NULL && next->paddr_index != paddr_index) {
+        entry = next;
+        next = next->next;
+    }
+    if (!next)
+        fprintf(logfile, "Trying to unmap address %p that is not in the 
mapcache!\n", buffer);
+    
+    entry->next = next->next;
+    errno = munmap(next->vaddr_base, MCACHE_BUCKET_SIZE);
+    if (errno) {
+        fprintf(logfile, "unmap fails %d\n", errno);
+        exit(-1);
+    }
+    qemu_free(next);
+}
+
 void qemu_invalidate_map_cache(void)
 {
     unsigned long i;
+    struct map_cache_rev *reventry;
+
+    qemu_aio_flush();
+
+    TAILQ_FOREACH(reventry, &locked_entries, next) {
+        fprintf(stderr, "There should be no locked mappings at this time, but 
%lx -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
+    }
 
     mapcache_lock();
 
diff --git a/i386-dm/exec-dm.c b/i386-dm/exec-dm.c
index 99a32b2..a509fc5 100644
--- a/i386-dm/exec-dm.c
+++ b/i386-dm/exec-dm.c
@@ -435,7 +435,7 @@ void unregister_iomem(target_phys_addr_t start)
 
 
 #if defined(__i386__) || defined(__x86_64__)
-#define phys_ram_addr(x) (qemu_map_cache(x))
+#define phys_ram_addr(x) (qemu_map_cache(x, 0))
 #elif defined(__ia64__)
 #define phys_ram_addr(x) (((x) < ram_size) ? (phys_ram_base + (x)) : NULL)
 #endif
@@ -712,78 +712,6 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) { }
 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 
{ }
 
-#ifdef DIRECT_MMAP
-void *cpu_physical_memory_map(target_phys_addr_t addr,
-                              target_phys_addr_t *plen,
-                              int is_write) {
-    xen_pfn_t first, last, count, i;
-    target_phys_addr_t offset;
-    void *vaddr;
-
-    if (!*plen)
-        return NULL;
-
-    first = addr >> XC_PAGE_SHIFT;
-    last = (addr + *plen - 1) >> XC_PAGE_SHIFT;
-    count = last - first + 1;
-    offset = addr & (XC_PAGE_SIZE-1);
-
-    xen_pfn_t pfns[count];
-
-fprintf(stderr,"cpu_physical_memory_map tpa=%lx *plen=%lx"
-        "  first=%lx last=%lx count=%lx offset=%lx ",
-        (unsigned long)addr,
-        (unsigned long)*plen,
-        (unsigned long)first,
-        (unsigned long)last,
-        (unsigned long)count,
-        (unsigned long)offset);
-        
-    for (i = 0; i < count; i++)
-        pfns[i] = first + i;
-
-    vaddr = xc_map_foreign_batch(xc_handle, domid,
-                                 is_write ? PROT_WRITE : PROT_READ,
-                                 pfns, count);
-fprintf(stderr," => vaddr=%p\n", vaddr);
-
-    if (!vaddr)
-        perror("cpu_physical_memory_map: map failed");
-
-    return vaddr;
-}
-
-void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
-                               int is_write, target_phys_addr_t access_len) {
-    uintptr_t start, end;
-    int ret;
-
-    if (!len) return;
-
-    start = (uintptr_t)buffer & ~((uintptr_t)XC_PAGE_SIZE - 1);
-    end = ((uintptr_t)(buffer + len - 1) | ((uintptr_t)XC_PAGE_SIZE - 1)) + 1;
-    
-fprintf(stderr,"cpu_physical_memory_unmap buffer=%p len=%lx"
-        "  start=%lx end=%lx  XC_PAGE_SIZE-1=%lx\n",
-        buffer,
-        (unsigned long)len,
-        (unsigned long)start,
-        (unsigned long)end,
-        (unsigned long)((uintptr_t)XC_PAGE_SIZE - 1)
-        );
-
-    ret = munmap((void*)start, end - start);
-    if (ret)
-        perror("cpu_physical_memory_unmap: munmap failed");
-}
-
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) {
-    return 0;
-}
-void cpu_unregister_map_client(void *cookie) {
-}
-#endif /*DIRECT_MMAP*/
-
 /* stub out various functions for Xen DM */
 void dump_exec_info(FILE *f,
                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) {
@@ -859,44 +787,10 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
                               target_phys_addr_t *plen,
                               int is_write)
 {
-    target_phys_addr_t len = *plen;
-    target_phys_addr_t done = 0;
-    int l;
-    uint8_t *ret = NULL;
-    uint8_t *ptr;
-    target_phys_addr_t page;
-    PhysPageDesc *p;
-    unsigned long addr1;
-
-    while (len > 0) {
-        page = addr & TARGET_PAGE_MASK;
-        l = (page + TARGET_PAGE_SIZE) - addr;
-        if (l > len)
-            l = len;
-
-        if (done || bounce.buffer) {
-            break;
-        }
-       bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
-        bounce.addr = addr;
-        bounce.len = l;
-        if (!is_write) {
-            cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
-        }
-        ptr = bounce.buffer;
-
-        if (!done) {
-            ret = ptr;
-        } else if (ret + done != ptr) {
-            break;
-        }
-
-        len -= l;
-        addr += l;
-        done += l;
-    }
-    *plen = done;
-    return ret;
+    unsigned long l = MCACHE_BUCKET_SIZE - (addr & (MCACHE_BUCKET_SIZE-1));
+    if ((*plen) > l)
+        *plen = l;
+    return qemu_map_cache(addr, 1);
 }
 
 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
@@ -906,11 +800,5 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
                                int is_write, target_phys_addr_t access_len)
 {
-    assert(buffer == bounce.buffer);
-    if (is_write) {
-        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
-    }
-    qemu_free(bounce.buffer);
-    bounce.buffer = NULL;
-    cpu_notify_map_clients();
+    qemu_invalidate_entry(buffer);
 }
diff --git a/qemu-xen.h b/qemu-xen.h
index ec4cd94..95a6651 100644
--- a/qemu-xen.h
+++ b/qemu-xen.h
@@ -9,7 +9,19 @@ extern int vga_ram_size;
 
 #if (defined(__i386__) || defined(__x86_64__)) && !defined(QEMU_TOOL)
 #define MAPCACHE
-uint8_t *qemu_map_cache(target_phys_addr_t phys_addr);
+
+#if defined(__i386__) 
+#define MAX_MCACHE_SIZE    0x40000000 /* 1GB max for x86 */
+#define MCACHE_BUCKET_SHIFT 16
+#elif defined(__x86_64__)
+#define MAX_MCACHE_SIZE    0x1000000000 /* 64GB max for x86_64 */
+#define MCACHE_BUCKET_SHIFT 20
+#endif
+
+#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
+
+uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, uint8_t lock);
+void     qemu_invalidate_entry(uint8_t *buffer);
 void     qemu_invalidate_map_cache(void);
 #else 
 #define qemu_invalidate_map_cache() ((void)0)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] improve the mapcache, Stefano Stabellini <=