[IA64] No need for cmpxchg on page_info structure.
Updates and checks on count_info and page owner can safely be
non-atomic.
This is ia64 counter part of 19088:055c589f4791.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -183,6 +183,9 @@
#include <asm/event.h>
#include <asm/debugger.h>
+
+#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING, _f "\n", ## _a)
+
static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
volatile pte_t* ptep, pte_t old_pte,
struct page_info* page);
@@ -2745,8 +2748,7 @@ steal_page(struct domain *d, struct page
#if 0 /* if big endian */
# error "implement big endian version of steal_page()"
#endif
- u32 _d, _nd;
- u64 x, nx, y;
+ u32 x, y;
if (page_get_owner(page) != d) {
gdprintk(XENLOG_INFO, "%s d 0x%p owner 0x%p\n",
@@ -2794,61 +2796,64 @@ steal_page(struct domain *d, struct page
}
spin_lock(&d->page_alloc_lock);
+ /* check again */
+ if (is_xen_heap_page(page) || page_get_owner(page) != d) {
+ goto fail;
+ }
/*
- * The tricky bit: atomically release ownership while there is just one
- * benign reference to the page (PGC_allocated). If that reference
- * disappears then the deallocation routine will safely spin.
+ * We require there is just one reference (PGC_allocated). We temporarily
+ * drop this reference now so that we can safely swizzle the owner.
*/
- _d = pickle_domptr(d);
- y = *((u64*)&page->count_info);
+ y = page->count_info;
do {
x = y;
- nx = x & 0xffffffff;
// page->count_info: untouched
- // page->u.inused._domain = 0;
- _nd = x >> 32;
if (unlikely(((x & (PGC_count_mask | PGC_allocated)) !=
- (1 | PGC_allocated))) ||
- unlikely(_nd != _d)) {
- struct domain* nd = unpickle_domptr(_nd);
+ (1 | PGC_allocated)))) {
+ struct domain* nd = page_get_owner(page);
if (nd == NULL) {
gdprintk(XENLOG_INFO, "gnttab_transfer: "
- "Bad page %p: ed=%p(%u) 0x%x, "
- "sd=%p 0x%x,"
- " caf=%016lx, taf=%" PRtype_info
+ "Bad page %p: ed=%p(%u), "
+ "sd=%p,"
+ " caf=%016x, taf=%" PRtype_info
" memflags 0x%x\n",
(void *) page_to_mfn(page),
- d, d->domain_id, _d,
- nd, _nd,
+ d, d->domain_id,
+ nd,
x,
page->u.inuse.type_info,
memflags);
} else {
gdprintk(XENLOG_WARNING, "gnttab_transfer: "
- "Bad page %p: ed=%p(%u) 0x%x, "
- "sd=%p(%u) 0x%x,"
- " caf=%016lx, taf=%" PRtype_info
+ "Bad page %p: ed=%p(%u), "
+ "sd=%p(%u),"
+ " caf=%016x, taf=%" PRtype_info
" memflags 0x%x\n",
(void *) page_to_mfn(page),
- d, d->domain_id, _d,
- nd, nd->domain_id, _nd,
+ d, d->domain_id,
+ nd, nd->domain_id,
x,
page->u.inuse.type_info,
memflags);
}
- spin_unlock(&d->page_alloc_lock);
- return -1;
+ goto fail;
}
- y = cmpxchg((u64*)&page->count_info, x, nx);
+ y = cmpxchg(&page->count_info, x, x & ~PGC_count_mask);
} while (unlikely(y != x));
- /*
- * Unlink from 'd'. At least one reference remains (now anonymous), so
- * noone else is spinning to try to delete this page from 'd'.
- */
+ /* Swizzle the owner then reinstate the PGC_allocated reference. */
+ page_set_owner(page, NULL);
+ y = page->count_info;
+ do {
+ x = y;
+ BUG_ON((x & (PGC_count_mask | PGC_allocated)) != PGC_allocated);
+ y = cmpxchg(&page->count_info, x, x | 1);
+ } while (unlikely(y != x));
+
+ /* Unlink from original owner. */
if ( !(memflags & MEMF_no_refcount) )
d->tot_pages--;
list_del(&page->list);
@@ -2856,6 +2861,13 @@ steal_page(struct domain *d, struct page
spin_unlock(&d->page_alloc_lock);
perfc_incr(steal_page);
return 0;
+
+ fail:
+ spin_unlock(&d->page_alloc_lock);
+ MEM_LOG("Bad page %p: ed=%p(%u), sd=%p, caf=%08x, taf=%" PRtype_info,
+ (void *)page_to_mfn(page), d, d->domain_id,
+ page_get_owner(page), page->count_info, page->u.inuse.type_info);
+ return -1;
}
static void
@@ -3043,14 +3055,6 @@ void domain_cache_flush (struct domain *
//printk ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
}
-#ifdef VERBOSE
-#define MEM_LOG(_f, _a...) \
- printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
- current->domain->domain_id , __LINE__ , ## _a )
-#else
-#define MEM_LOG(_f, _a...) ((void)0)
-#endif
-
static void free_page_type(struct page_info *page, u32 type)
{
}
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -167,24 +167,28 @@ static inline void put_page(struct page_
static inline int get_page(struct page_info *page,
struct domain *domain)
{
- u64 x, nx, y = *((u64*)&page->count_info);
- u32 _domain = pickle_domptr(domain);
+ u32 x, y = page->count_info;
do {
- x = y;
- nx = x + 1;
- if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
- unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
- unlikely((x >> 32) != _domain)) { /* Wrong owner? */
+ x = y;
+ if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
+ unlikely(((x + 1) & PGC_count_mask) == 0) ) {/* Count overflow? */
+ goto fail;
+ }
+ y = cmpxchg_acq(&page->count_info, x, x + 1);
+ } while (unlikely(y != x));
- gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx,
taf=%"
- PRtype_info "\n", page_to_mfn(page), domain,
- unpickle_domptr(x >> 32), x, page->u.inuse.type_info);
- return 0;
- }
- }
- while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x));
- return 1;
+ if (likely(page_get_owner(page) == domain))
+ return 1;
+
+ put_page(page);
+fail:
+ /* if (!domain->is_dying) */ /* XXX: header inclusion hell */
+ gdprintk(XENLOG_INFO,
+ "Error pfn %lx: rd=%p, od=%p, caf=%016x, taf=%" PRtype_info "\n",
+ page_to_mfn(page), domain,
+ page_get_owner(page), y, page->u.inuse.type_info);
+ return 0;
}
int is_iomem_page(unsigned long mfn);
no-needcmpxchg8b-on-page_info-structure.patch
Description: Text Data
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|