WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH v2 5/5] xen: mapcache performance improvements

From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

Use qemu_invalidate_entry in cpu_physical_memory_unmap.

Do not lock mapcache entries in qemu_get_ram_ptr if the address falls in
the ramblock with offset == 0. We don't need to do that because the
callers of qemu_get_ram_ptr either try to map an entire block, other
from the main ramblock, or until the end of a page to implement a single
read or write in the main ramblock.
If we don't lock mapcache entries in qemu_get_ram_ptr we don't need to
call qemu_invalidate_entry in qemu_put_ram_ptr anymore because we can
leave with few long lived block mappings requested by devices.

Also move the call to qemu_ram_addr_from_mapcache at the beginning of
qemu_ram_addr_from_host.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 exec.c |   28 ++++++++++------------------
 1 files changed, 10 insertions(+), 18 deletions(-)

diff --git a/exec.c b/exec.c
index ff9c174..aebb23b 100644
--- a/exec.c
+++ b/exec.c
@@ -3065,9 +3065,10 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
             if (xen_mapcache_enabled()) {
                 /* We need to check if the requested address is in the RAM
                  * because we don't want to map the entire memory in QEMU.
+                 * In that case just map until the end of the page.
                  */
                 if (block->offset == 0) {
-                    return qemu_map_cache(addr, 0, 1);
+                    return qemu_map_cache(addr, 0, 0);
                 } else if (block->host == NULL) {
                     block->host = qemu_map_cache(block->offset, block->length, 
1);
                 }
@@ -3094,9 +3095,10 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
             if (xen_mapcache_enabled()) {
                 /* We need to check if the requested address is in the RAM
                  * because we don't want to map the entire memory in QEMU.
+                 * In that case just map until the end of the page.
                  */
                 if (block->offset == 0) {
-                    return qemu_map_cache(addr, 0, 1);
+                    return qemu_map_cache(addr, 0, 0);
                 } else if (block->host == NULL) {
                     block->host = qemu_map_cache(block->offset, block->length, 
1);
                 }
@@ -3139,10 +3141,6 @@ void *qemu_ram_ptr_length(target_phys_addr_t addr, 
target_phys_addr_t *size)
 void qemu_put_ram_ptr(void *addr)
 {
     trace_qemu_put_ram_ptr(addr);
-
-    if (xen_mapcache_enabled()) {
-            qemu_invalidate_entry(block->host);
-    }
 }
 
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
@@ -3150,6 +3148,11 @@ int qemu_ram_addr_from_host(void *ptr, ram_addr_t 
*ram_addr)
     RAMBlock *block;
     uint8_t *host = ptr;
 
+    if (xen_mapcache_enabled()) {
+        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
+        return 0;
+    }
+
     QLIST_FOREACH(block, &ram_list.blocks, next) {
         /* This case append when the block is not mapped. */
         if (block->host == NULL) {
@@ -3161,11 +3164,6 @@ int qemu_ram_addr_from_host(void *ptr, ram_addr_t 
*ram_addr)
         }
     }
 
-    if (xen_mapcache_enabled()) {
-        *ram_addr = qemu_ram_addr_from_mapcache(ptr);
-        return 0;
-    }
-
     return -1;
 }
 
@@ -4066,13 +4064,7 @@ void cpu_physical_memory_unmap(void *buffer, 
target_phys_addr_t len,
             }
         }
         if (xen_mapcache_enabled()) {
-            uint8_t *buffer1 = buffer;
-            uint8_t *end_buffer = buffer + len;
-
-            while (buffer1 < end_buffer) {
-                qemu_put_ram_ptr(buffer1);
-                buffer1 += TARGET_PAGE_SIZE;
-            }
+            qemu_invalidate_entry(buffer);
         }
         return;
     }
-- 
1.7.2.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel