# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260355496 0
# Node ID 18342df0f9dcf3a0a5a7ce05ea0218cf94335157
# Parent 2c6a04fdf8fb00a26aec125952a710de6cc8439c
tmem: reclaim minimal memory proactively
When a single domain is using most/all of tmem memory
for ephemeral pages belonging to the same object, e.g.
when copying a single huge file larger than ephemeral
memory, long lists are traversed looking for a page to
evict that doesn't belong to this object (as pages in
the object for which a page is currently being inserted
are locked and cannot be evicted). This is essentially
a livelock.
Avoid this by proactively ensuring there is a margin
of available memory (1MB) before locks are taken on
the object.
Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
---
xen/common/tmem.c | 20 ++++++++++++++++++++
xen/include/xen/tmem_xen.h | 5 +++++
2 files changed, 25 insertions(+)
diff -r 2c6a04fdf8fb -r 18342df0f9dc xen/common/tmem.c
--- a/xen/common/tmem.c Wed Dec 09 10:44:11 2009 +0000
+++ b/xen/common/tmem.c Wed Dec 09 10:44:56 2009 +0000
@@ -1091,6 +1091,24 @@ static unsigned long tmem_relinquish_npa
if ( avail_pages )
tmh_release_avail_pages_to_host();
return avail_pages;
+}
+
+/* Under certain conditions (e.g. if each client is putting pages for exactly
+ * one object), once locks are held, freeing up memory may
+ * result in livelocks and very long "put" times, so we try to ensure there
+ * is a minimum amount of memory (1MB) available BEFORE any data structure
+ * locks are held */
+static inline void tmem_ensure_avail_pages(void)
+{
+ int failed_evict = 10;
+
+ while ( !tmh_free_mb() )
+ {
+ if ( tmem_evict() )
+ continue;
+ else if ( failed_evict-- <= 0 )
+ break;
+ }
}
/************ TMEM CORE OPERATIONS ************************************/
@@ -2315,10 +2333,12 @@ EXPORT long do_tmem_op(tmem_cli_op_t uop
op.u.new.uuid[0], op.u.new.uuid[1]);
break;
case TMEM_NEW_PAGE:
+ tmem_ensure_avail_pages();
rc = do_tmem_put(pool, op.u.gen.object,
op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, NULL);
break;
case TMEM_PUT_PAGE:
+ tmem_ensure_avail_pages();
rc = do_tmem_put(pool, op.u.gen.object,
op.u.gen.index, op.u.gen.cmfn, 0, 0, PAGE_SIZE, NULL);
if (rc == 1) succ_put = 1;
diff -r 2c6a04fdf8fb -r 18342df0f9dc xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Wed Dec 09 10:44:11 2009 +0000
+++ b/xen/include/xen/tmem_xen.h Wed Dec 09 10:44:56 2009 +0000
@@ -250,6 +250,11 @@ static inline unsigned long tmh_freeable
{
return (tmh_avail_pages() + _atomic_read(freeable_page_count)) >>
(20 - PAGE_SHIFT);
+}
+
+static inline unsigned long tmh_free_mb(void)
+{
+ return (tmh_avail_pages() + total_free_pages()) >> (20 - PAGE_SHIFT);
}
/*
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|