[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [Qemu-devel] [PATCH V9 12/16] Introduce qemu_ram_ptr_unlock.



On 01/25/2011 08:29 AM, anthony.perard@xxxxxxxxxx wrote:
From: Anthony PERARD<anthony.perard@xxxxxxxxxx>

This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After
a call to qemu_ram_ptr_unlock, the pointer may be unmap from QEMU when
used with Xen.

Signed-off-by: Anthony PERARD<anthony.perard@xxxxxxxxxx>
---
  cpu-common.h   |    1 +
  exec.c         |   10 ++++++++++
  xen-mapcache.c |   34 ++++++++++++++++++++++++++++++++++
  3 files changed, 45 insertions(+), 0 deletions(-)

diff --git a/cpu-common.h b/cpu-common.h
index 6d4a898..8fa6d80 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -55,6 +55,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
  /* Same but slower, to use for migration, where the order of
   * RAMBlocks must not change. */
  void *qemu_safe_ram_ptr(ram_addr_t addr);
+void qemu_ram_ptr_unlock(void *addr);
  /* This should not be used by devices.  */
  int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
  ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 3b137dc..8acf2a9 100644
--- a/exec.c
+++ b/exec.c
@@ -2977,6 +2977,13 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
      return NULL;
  }

+void qemu_ram_ptr_unlock(void *addr)
+{
+    if (xen_mapcache_enabled()) {
+        qemu_map_cache_unlock(addr);
+    }
+}
+

qemu_put_ram_ptr() would be a better name.

Regards,

Anthony Liguori

  int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
  {
      RAMBlock *block;
@@ -3692,6 +3699,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                      cpu_physical_memory_set_dirty_flags(
                          addr1, (0xff&  ~CODE_DIRTY_FLAG));
                  }
+                qemu_ram_ptr_unlock(ptr);
              }
          } else {
              if ((pd&  ~TARGET_PAGE_MASK)>  IO_MEM_ROM&&
@@ -3722,6 +3730,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, 
uint8_t *buf,
                  ptr = qemu_get_ram_ptr(pd&  TARGET_PAGE_MASK) +
                      (addr&  ~TARGET_PAGE_MASK);
                  memcpy(buf, ptr, l);
+                qemu_ram_ptr_unlock(ptr);
              }
          }
          len -= l;
@@ -3762,6 +3771,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t 
addr,
              /* ROM/RAM case */
              ptr = qemu_get_ram_ptr(addr1);
              memcpy(ptr, buf, l);
+            qemu_ram_ptr_unlock(ptr);
          }
          len -= l;
          buf += l;
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 3e1cca9..23a23f9 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -187,6 +187,40 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, 
target_phys_addr_t size, u
      return mapcache->last_address_vaddr + address_offset;
  }

+void qemu_map_cache_unlock(void *buffer)
+{
+    MapCacheEntry *entry = NULL, *pentry = NULL;
+    MapCacheRev *reventry;
+    target_phys_addr_t paddr_index;
+    int found = 0;
+
+    QTAILQ_FOREACH(reventry,&mapcache->locked_entries, next) {
+        if (reventry->vaddr_req == buffer) {
+            paddr_index = reventry->paddr_index;
+            found = 1;
+            break;
+        }
+    }
+    if (!found) {
+        return;
+    }
+    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+    qemu_free(reventry);
+
+    entry =&mapcache->entry[paddr_index % mapcache->nr_buckets];
+    while (entry&&  entry->paddr_index != paddr_index) {
+        pentry = entry;
+        entry = entry->next;
+    }
+    if (!entry) {
+        return;
+    }
+    entry->lock--;
+    if (entry->lock>  0) {
+        entry->lock--;
+    }
+}
+
  ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
  {
      MapCacheRev *reventry;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.