[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 1/4] xen: move steal_page and donate_page to common code



Only code movement, except for a small change to the warning printed in
case of an error in donate_page.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
CC: keir@xxxxxxx
CC: JBeulich@xxxxxxxx

Changes in v5:
- move donate_page to common code;
- update commit message.

Changes in v4:
- move steal_page to common code.
---
 xen/arch/arm/mm.c        |   12 ------
 xen/arch/x86/mm.c        |   85 ----------------------------------------------
 xen/common/memory.c      |   85 ++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/mm.h |    5 ---
 xen/include/asm-x86/mm.h |    5 ---
 xen/include/xen/mm.h     |    3 ++
 6 files changed, 88 insertions(+), 107 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index f301e65..89b9d27 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -742,18 +742,6 @@ void arch_dump_shared_mem_info(void)
 {
 }
 
-int donate_page(struct domain *d, struct page_info *page, unsigned int 
memflags)
-{
-    ASSERT(0);
-    return -ENOSYS;
-}
-
-int steal_page(
-    struct domain *d, struct page_info *page, unsigned int memflags)
-{
-    return -1;
-}
-
 int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
 {
     ASSERT(0);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e7f0e13..a512046 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4124,91 +4124,6 @@ int replace_grant_host_mapping(
     return rc;
 }
 
-int donate_page(
-    struct domain *d, struct page_info *page, unsigned int memflags)
-{
-    spin_lock(&d->page_alloc_lock);
-
-    if ( is_xen_heap_page(page) || (page_get_owner(page) != NULL) )
-        goto fail;
-
-    if ( d->is_dying )
-        goto fail;
-
-    if ( page->count_info & ~(PGC_allocated | 1) )
-        goto fail;
-
-    if ( !(memflags & MEMF_no_refcount) )
-    {
-        if ( d->tot_pages >= d->max_pages )
-            goto fail;
-        domain_adjust_tot_pages(d, 1);
-    }
-
-    page->count_info = PGC_allocated | 1;
-    page_set_owner(page, d);
-    page_list_add_tail(page,&d->page_list);
-
-    spin_unlock(&d->page_alloc_lock);
-    return 0;
-
- fail:
-    spin_unlock(&d->page_alloc_lock);
-    MEM_LOG("Bad donate %p: ed=%p(%u), sd=%p, caf=%08lx, taf=%" PRtype_info,
-            (void *)page_to_mfn(page), d, d->domain_id,
-            page_get_owner(page), page->count_info, page->u.inuse.type_info);
-    return -1;
-}
-
-int steal_page(
-    struct domain *d, struct page_info *page, unsigned int memflags)
-{
-    unsigned long x, y;
-    bool_t drop_dom_ref = 0;
-
-    spin_lock(&d->page_alloc_lock);
-
-    if ( is_xen_heap_page(page) || (page_get_owner(page) != d) )
-        goto fail;
-
-    /*
-     * We require there is just one reference (PGC_allocated). We temporarily
-     * drop this reference now so that we can safely swizzle the owner.
-     */
-    y = page->count_info;
-    do {
-        x = y;
-        if ( (x & (PGC_count_mask|PGC_allocated)) != (1 | PGC_allocated) )
-            goto fail;
-        y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
-    } while ( y != x );
-
-    /* Swizzle the owner then reinstate the PGC_allocated reference. */
-    page_set_owner(page, NULL);
-    y = page->count_info;
-    do {
-        x = y;
-        BUG_ON((x & (PGC_count_mask|PGC_allocated)) != PGC_allocated);
-    } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
-
-    /* Unlink from original owner. */
-    if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
-        drop_dom_ref = 1;
-    page_list_del(page, &d->page_list);
-
-    spin_unlock(&d->page_alloc_lock);
-    if ( unlikely(drop_dom_ref) )
-        put_domain(d);
-    return 0;
-
- fail:
-    spin_unlock(&d->page_alloc_lock);
-    MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08lx, taf=%" PRtype_info,
-            (void *)page_to_mfn(page), d, d->domain_id,
-            page_get_owner(page), page->count_info, page->u.inuse.type_info);
-    return -1;
-}
-
 static int __do_update_va_mapping(
     unsigned long va, u64 val64, unsigned long flags, struct domain *pg_owner)
 {
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 50b740f..4b2f311 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -230,6 +230,91 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
     return 1;
 }
 
+int donate_page(
+    struct domain *d, struct page_info *page, unsigned int memflags)
+{
+    spin_lock(&d->page_alloc_lock);
+
+    if ( is_xen_heap_page(page) || (page_get_owner(page) != NULL) )
+        goto fail;
+
+    if ( d->is_dying )
+        goto fail;
+
+    if ( page->count_info & ~(PGC_allocated | 1) )
+        goto fail;
+
+    if ( !(memflags & MEMF_no_refcount) )
+    {
+        if ( d->tot_pages >= d->max_pages )
+            goto fail;
+        domain_adjust_tot_pages(d, 1);
+    }
+
+    page->count_info = PGC_allocated | 1;
+    page_set_owner(page, d);
+    page_list_add_tail(page,&d->page_list);
+
+    spin_unlock(&d->page_alloc_lock);
+    return 0;
+
+ fail:
+    spin_unlock(&d->page_alloc_lock);
+    gdprintk(XENLOG_WARNING, "Bad donate %p: ed=%p(%u), sd=%p, caf=%08lx, 
taf=%016lx",
+            (void *)page_to_mfn(page), d, d->domain_id,
+            page_get_owner(page), page->count_info, page->u.inuse.type_info);
+    return -1;
+}
+
+int steal_page(
+    struct domain *d, struct page_info *page, unsigned int memflags)
+{
+    unsigned long x, y;
+    bool_t drop_dom_ref = 0;
+
+    spin_lock(&d->page_alloc_lock);
+
+    if ( is_xen_heap_page(page) || (page_get_owner(page) != d) )
+        goto fail;
+
+    /*
+     * We require there is just one reference (PGC_allocated). We temporarily
+     * drop this reference now so that we can safely swizzle the owner.
+     */
+    y = page->count_info;
+    do {
+        x = y;
+        if ( (x & (PGC_count_mask|PGC_allocated)) != (1 | PGC_allocated) )
+            goto fail;
+        y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
+    } while ( y != x );
+
+    /* Swizzle the owner then reinstate the PGC_allocated reference. */
+    page_set_owner(page, NULL);
+    y = page->count_info;
+    do {
+        x = y;
+        BUG_ON((x & (PGC_count_mask|PGC_allocated)) != PGC_allocated);
+    } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
+
+    /* Unlink from original owner. */
+    if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
+        drop_dom_ref = 1;
+    page_list_del(page, &d->page_list);
+
+    spin_unlock(&d->page_alloc_lock);
+    if ( unlikely(drop_dom_ref) )
+        put_domain(d);
+    return 0;
+
+ fail:
+    spin_unlock(&d->page_alloc_lock);
+    printk("Bad page %p: ed=%p(%u), sd=%p, caf=%08lx, taf=%lx\n",
+            (void *)page_to_mfn(page), d, d->domain_id,
+            page_get_owner(page), page->count_info, page->u.inuse.type_info);
+    return -1;
+}
+
 static void decrease_reservation(struct memop_args *a)
 {
     unsigned long i, j;
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 5e7c5a3..0b0a457 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -297,11 +297,6 @@ static inline int relinquish_shared_pages(struct domain *d)
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
 
-int steal_page(
-    struct domain *d, struct page_info *page, unsigned int memflags);
-int donate_page(
-    struct domain *d, struct page_info *page, unsigned int memflags);
-
 #define domain_set_alloc_bitsize(d) ((void)0)
 #define domain_clamp_alloc_bitsize(d, b) (b)
 
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 213fc9c..e5254ee 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -566,11 +566,6 @@ long subarch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg);
 int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
-int steal_page(
-    struct domain *d, struct page_info *page, unsigned int memflags);
-int donate_page(
-    struct domain *d, struct page_info *page, unsigned int memflags);
-
 int map_ldt_shadow_page(unsigned int);
 
 #define NIL(type) ((type *)-sizeof(type))
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 4f5795c..1dc859b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -60,6 +60,9 @@ void destroy_xen_mappings(unsigned long v, unsigned long e);
 unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
 int domain_set_outstanding_pages(struct domain *d, unsigned long pages);
 void get_outstanding_claims(uint64_t *free_pages, uint64_t *outstanding_pages);
+int steal_page(struct domain *d, struct page_info *page, unsigned int 
memflags);
+int donate_page(struct domain *d, struct page_info *page, unsigned int 
memflags);
+
 
 /* Domain suballocator. These functions are *not* interrupt-safe.*/
 void init_domheap_pages(paddr_t ps, paddr_t pe);
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.