WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH 6/7] libxc: make xc_ia64_copy_memmap aware of se

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH 6/7] libxc: make xc_ia64_copy_memmap aware of sequence lock
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Mon, 29 Sep 2008 21:50:02 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Delivery-date: Mon, 29 Sep 2008 05:46:21 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.18 (2008-05-17)
[IA64] libxc: make xc_ia64_copy_memmap aware of sequence lock.

Guest domain's memory map may be updated concurrently so that
it is protected sequence lock.
This patch makes xc_ia64_copy_memmap() aware of sequence lock.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r af0969e7e408 tools/libxc/ia64/xc_ia64_stubs.c
--- a/tools/libxc/ia64/xc_ia64_stubs.c  Mon Sep 29 21:11:25 2008 +0900
+++ b/tools/libxc/ia64/xc_ia64_stubs.c  Mon Sep 29 21:12:53 2008 +0900
@@ -60,46 +60,120 @@
             ? -1 : domctl.u.getdomaininfo.max_pages);
 }
 
-int
-xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
-                    xen_ia64_memmap_info_t **memmap_info_p,
-                    unsigned long *memmap_info_num_pages_p)
+/*
+ * Copy memmap info from guest domain pages into local buffers.
+ * memmap info can be updated concurrently. So
+ *  - copy them into local buffers and use local copy in order to
+ *    avoie inconsistent state.
+ *  - protect them by seqlock. shared_info->arch.memmap_sequence.
+ *  - d->arch.convmem_end (which can be got by XENMEM_maximum_gpfn) is
+ *    also updated concurrently. The valuable is increased only.
+ */
+/*#define cpu_relax()     ia64_hint(ia64_hint_pause)*/
+#define cpu_relax()     sched_yield()
+#define unlikely(x)     __builtin_expect((x),0)
+
+static unsigned long
+xc_ia64_mi_read_seqbegin(const shared_info_t *live_shinfo)
 {
-    unsigned int memmap_info_num_pages;
+    unsigned long ret;
+
+ repeat:
+    ret = live_shinfo->arch.memmap_sequence;
+    xen_rmb();
+    if (unlikely(ret & 1)) {
+        cpu_relax();
+        goto repeat;
+    }
+
+    return ret;
+}
+
+static int
+xc_ia64_mi_read_seqretry(const shared_info_t *live_shinfo, unsigned long start)
+{
+    xen_rmb();
+
+    return (live_shinfo->arch.memmap_sequence != start);
+}
+
+/* copy before use in case someone updating them */
+static int
+__xc_ia64_copy_memmap(int xc_handle, uint32_t domid,
+                      shared_info_t *live_shinfo,
+                      xen_ia64_memmap_info_t **memmap_info,
+                      unsigned long *memmap_info_num_pages)
+{
     unsigned long memmap_info_pfn;
     unsigned long memmap_size;
+    xen_ia64_memmap_info_t *memmap_info_live;
 
-    xen_ia64_memmap_info_t *memmap_info_live;
-    xen_ia64_memmap_info_t *memmap_info;
-
-    /* copy before use in case someone updating them */
-    memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
+    *memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
     memmap_info_pfn = live_shinfo->arch.memmap_info_pfn;
-    if (memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
+    if (*memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
         ERROR("memmap_info_num_pages 0x%x memmap_info_pfn 0x%lx",
-              memmap_info_num_pages, memmap_info_pfn);
+              *memmap_info_num_pages, memmap_info_pfn);
         return -1;
     }
 
-    memmap_size = memmap_info_num_pages << PAGE_SHIFT;
+    memmap_size = *memmap_info_num_pages << PAGE_SHIFT;
     memmap_info_live = xc_map_foreign_range(xc_handle, domid, memmap_size,
                                             PROT_READ, memmap_info_pfn);
     if (memmap_info_live == NULL) {
         PERROR("Could not map memmap info.");
         return -1;
     }
-    memmap_info = malloc(memmap_size);
-    if (memmap_info == NULL) {
+    *memmap_info = malloc(memmap_size);
+    if (*memmap_info == NULL) {
         munmap(memmap_info_live, memmap_size);
         return -1;
     }
-    memcpy(memmap_info, memmap_info_live, memmap_size); /* copy before use */
+    memcpy(*memmap_info, memmap_info_live, memmap_size); /* copy before use */
     munmap(memmap_info_live, memmap_size);
+    return 0;
+}
+
+int
+xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
+                    xen_ia64_memmap_info_t **memmap_info_p,
+                    unsigned long *memmap_info_num_pages_p)
+{
+    unsigned long gpfn_max_prev;
+    unsigned long gpfn_max_post;
+    unsigned long seq;
+
+    unsigned long memmap_info_num_pages;
+    xen_ia64_memmap_info_t *memmap_info;
+
+    gpfn_max_prev = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0)
+        return -1;
+
+ again:
+    do {
+        seq = xc_ia64_mi_read_seqbegin(live_shinfo);
+        if(__xc_ia64_copy_memmap(xc_handle, domid, live_shinfo,
+                                 &memmap_info, &memmap_info_num_pages)) {
+            return -1;
+        }
+    } while (xc_ia64_mi_read_seqretry(live_shinfo, seq));
+
+    gpfn_max_post = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0) {
+        free(memmap_info);
+        return -1;
+    }
+    if (gpfn_max_post > gpfn_max_prev) {
+        free(memmap_info);
+        gpfn_max_prev = gpfn_max_post;
+        goto again;
+    }
 
     /* reject unknown memmap */
     if (memmap_info->efi_memdesc_size != sizeof(efi_memory_desc_t) ||
         (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
-        memmap_info->efi_memmap_size > memmap_size - sizeof(memmap_info) ||
+        memmap_info->efi_memmap_size >
+        (memmap_info_num_pages << PAGE_SHIFT) - sizeof(memmap_info) ||
         memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION) {
         PERROR("unknown memmap header. defaulting to compat mode.");
         free(memmap_info);

Attachment: memmap-info-tools-stack-seqlock.patch
Description: Text Data

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-ia64-devel] [PATCH 6/7] libxc: make xc_ia64_copy_memmap aware of sequence lock, Isaku Yamahata <=