Expose tmem "freeable" memory for use by management tools.
Management tools looking for a machine with available
memory often look at free_memory to determine if there
is enough physical memory to house a new or migrating
guest. Since tmem absorbs much or all free memory,
and since "ephemeral" tmem memory can be synchronously
freed, management tools need more data -- not only how
much memory is "free" but also how much memory is
"freeable" by tmem if tmem is told (via an already
existing tmem hypercall) to relinquish freeable memory.
This patch provides that extra piece of data (in MB).
Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
tools/python/xen/lowlevel/xc/xc.c | 2 ++
tools/python/xen/xend/XendAPI.py | 9 +++++++++
tools/python/xen/xend/XendConstants.py | 1 +
tools/python/xen/xend/XendNode.py | 10 ++++++++++
tools/python/xen/xend/server/XMLRPCServer.py | 2 +-
tools/python/xen/xm/main.py | 8 ++++++++
xen/common/tmem.c | 11 +++++++----
xen/common/tmem_xen.c | 2 ++
xen/include/public/tmem.h | 3 +--
xen/include/xen/tmem_xen.h | 26 +++++++++++++++++++++++++-
10 files changed, 66 insertions(+), 8 deletions(-)
===============
diff -r b9cdcf502aa3 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Aug 06 11:14:48 2009 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Aug 07 14:32:39 2009 -0600
@@ -1544,6 +1544,8 @@ static PyObject *pyxc_tmem_control(XcObj
case TMEMC_LIST:
return Py_BuildValue("s", buffer);
case TMEMC_FLUSH:
+ return Py_BuildValue("i", rc);
+ case TMEMC_QUERY_FREEABLE_MB:
return Py_BuildValue("i", rc);
case TMEMC_THAW:
case TMEMC_FREEZE:
diff -r b9cdcf502aa3 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py Thu Aug 06 11:14:48 2009 +0100
+++ b/tools/python/xen/xend/XendAPI.py Fri Aug 07 14:32:39 2009 -0600
@@ -934,6 +934,7 @@ class XendAPI(object):
('tmem_set_weight', None),
('tmem_set_cap', None),
('tmem_set_compress', None),
+ ('tmem_query_freeable_mb', None),
('tmem_shared_auth', None)]
host_funcs = [('get_by_name_label', None),
@@ -1133,6 +1134,14 @@ class XendAPI(object):
except Exception, e:
return xen_api_error(e)
return xen_api_success_void()
+
+ def host_tmem_query_freeable_mb(self, _, host_ref):
+ node = XendNode.instance()
+ try:
+ pages = node.tmem_query_freeable_mb()
+ except Exception, e:
+ return xen_api_error(e)
+ return xen_api_success(pages is None and -1 or pages)
def host_tmem_shared_auth(self, _, host_ref, cli_id, uuid_str, auth):
node = XendNode.instance()
diff -r b9cdcf502aa3 tools/python/xen/xend/XendConstants.py
--- a/tools/python/xen/xend/XendConstants.py Thu Aug 06 11:14:48 2009 +0100
+++ b/tools/python/xen/xend/XendConstants.py Fri Aug 07 14:32:39 2009 -0600
@@ -162,4 +162,5 @@ TMEMC_SET_WEIGHT = 5
TMEMC_SET_WEIGHT = 5
TMEMC_SET_CAP = 6
TMEMC_SET_COMPRESS = 7
+TMEMC_QUERY_FREEABLE_MB = 8
diff -r b9cdcf502aa3 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Thu Aug 06 11:14:48 2009 +0100
+++ b/tools/python/xen/xend/XendNode.py Fri Aug 07 14:32:39 2009 -0600
@@ -1012,6 +1012,16 @@ class XendNode:
buf = ''
return self.xc.tmem_control(pool_id, subop, cli_id, arg1, arg2, arg3,
buf)
+ def tmem_query_freeable_mb(self):
+ pool_id = -1
+ cli_id = -1
+ subop = TMEMC_QUERY_FREEABLE_MB
+ arg1 = 0
+ arg2 = 0
+ arg3 = 0
+ buf = ''
+ return self.xc.tmem_control(pool_id, subop, cli_id, arg1, arg2, arg3,
buf)
+
def tmem_shared_auth(self, cli_id, uuid_str, auth):
return self.xc.tmem_auth(cli_id, uuid_str, auth)
diff -r b9cdcf502aa3 tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py Thu Aug 06 11:14:48
2009 +0100
+++ b/tools/python/xen/xend/server/XMLRPCServer.py Fri Aug 07 14:32:39
2009 -0600
@@ -203,7 +203,7 @@ class XMLRPCServer:
'tmem_list', 'tmem_freeze', 'tmem_thaw',
'tmem_flush', 'tmem_destroy', 'tmem_set_weight',
'tmem_set_cap', 'tmem_set_compress',
- 'tmem_shared_auth'],
+ 'tmem_query_freeable_mb', 'tmem_shared_auth'],
'node'),
(XendDmesg, ['info', 'clear'], 'node.dmesg')]:
inst = type.instance()
diff -r b9cdcf502aa3 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Thu Aug 06 11:14:48 2009 +0100
+++ b/tools/python/xen/xm/main.py Fri Aug 07 14:32:39 2009 -0600
@@ -207,6 +207,7 @@ SUBCOMMAND_HELP = {
'tmem-set' : ('[<Domain>|-a|--all] [weight=<weight>] [cap=<cap>] '
'[compress=<compress>]',
'Change tmem settings.'),
+ 'tmem-freeable' : ('', 'Print number of freeable tmem pages.'),
'tmem-shared-auth' : ('[<Domain>|-a|--all] [--uuid=<uuid>]
[--auth=<0|1>]', 'De/authenticate shared tmem pool.'),
# security
@@ -3136,6 +3137,12 @@ def xm_tmem_set(args):
if compress is not None:
server.xend.node.tmem_set_compress(domid, compress)
+def xm_tmem_freeable_mb(args):
+ if serverType == SERVER_XEN_API:
+ print server.xenapi.host.tmem_query_freeable_mb()
+ else:
+ print server.xend.node.tmem_query_freeable_mb()
+
def xm_tmem_shared_auth(args):
try:
(options, params) = getopt.gnu_getopt(args, 'au:A:',
['all','uuid=','auth='])
@@ -3258,6 +3265,7 @@ commands = {
"tmem-destroy": xm_tmem_destroy,
"tmem-list": xm_tmem_list,
"tmem-set": xm_tmem_set,
+ "tmem-freeable": xm_tmem_freeable_mb,
"tmem-shared-auth": xm_tmem_shared_auth,
}
diff -r b9cdcf502aa3 xen/common/tmem.c
--- a/xen/common/tmem.c Thu Aug 06 11:14:48 2009 +0100
+++ b/xen/common/tmem.c Fri Aug 07 14:32:39 2009 -0600
@@ -752,7 +752,7 @@ static pool_t * pool_alloc(void)
pool_t *pool;
int i;
- if ( (pool = tmem_malloc(pool_t,NULL)) == NULL )
+ if ( (pool = tmh_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
return NULL;
for (i = 0; i < OBJ_HASH_BUCKETS; i++)
pool->obj_rb_root[i] = RB_ROOT;
@@ -780,7 +780,7 @@ static NOINLINE void pool_free(pool_t *p
INVERT_SENTINEL(pool,POOL);
pool->client = NULL;
list_del(&pool->pool_list);
- tmem_free(pool,sizeof(pool_t),NULL);
+ tmh_free_infra(pool);
}
/* register new_client as a user of this shared pool and return new
@@ -898,7 +898,7 @@ static void pool_flush(pool_t *pool, cli
static client_t *client_create(cli_id_t cli_id)
{
- client_t *client = tmem_malloc(client_t,NULL);
+ client_t *client = tmh_alloc_infra(sizeof(client_t),__alignof__(client_t));
int i;
printk("tmem: initializing tmem capability for
%s=%d...",cli_id_str,cli_id);
@@ -912,7 +912,7 @@ static client_t *client_create(cli_id_t
{
printk("failed... can't allocate host-dependent part of client\n");
if ( client )
- tmem_free(client,sizeof(client_t),NULL);
+ tmh_free_infra(client);
return NULL;
}
tmh_set_client_from_id(client,cli_id);
@@ -2150,6 +2150,9 @@ static NOINLINE int do_tmem_control(stru
case TMEMC_SET_COMPRESS:
ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1);
break;
+ case TMEMC_QUERY_FREEABLE_MB:
+ ret = tmh_freeable_mb();
+ break;
case TMEMC_SAVE_BEGIN:
case TMEMC_RESTORE_BEGIN:
case TMEMC_SAVE_GET_VERSION:
diff -r b9cdcf502aa3 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Thu Aug 06 11:14:48 2009 +0100
+++ b/xen/common/tmem_xen.c Fri Aug 07 14:32:39 2009 -0600
@@ -25,6 +25,8 @@ boolean_param("tmem_shared_auth", opt_tm
EXPORT int opt_tmem_lock = 0;
integer_param("tmem_lock", opt_tmem_lock);
+
+EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0);
#ifdef COMPARE_COPY_PAGE_SSE2
DECL_CYC_COUNTER(pg_copy1);
diff -r b9cdcf502aa3 xen/include/public/tmem.h
--- a/xen/include/public/tmem.h Thu Aug 06 11:14:48 2009 +0100
+++ b/xen/include/public/tmem.h Fri Aug 07 14:32:39 2009 -0600
@@ -55,8 +55,7 @@
#define TMEMC_SET_WEIGHT 5
#define TMEMC_SET_CAP 6
#define TMEMC_SET_COMPRESS 7
-#define TMEMC_SHARED_POOL_AUTH 8
-#define TMEMC_SHARED_POOL_DEAUTH 9
+#define TMEMC_QUERY_FREEABLE_MB 8
#define TMEMC_SAVE_BEGIN 10
#define TMEMC_SAVE_GET_VERSION 11
#define TMEMC_SAVE_GET_MAXPOOLS 12
diff -r b9cdcf502aa3 xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Thu Aug 06 11:14:48 2009 +0100
+++ b/xen/include/xen/tmem_xen.h Fri Aug 07 14:32:39 2009 -0600
@@ -35,6 +35,7 @@ extern struct page_list_head tmh_page_li
extern struct page_list_head tmh_page_list;
extern spinlock_t tmh_page_list_lock;
extern unsigned long tmh_page_list_pages;
+extern atomic_t freeable_page_count;
extern spinlock_t tmem_lock;
extern spinlock_t tmem_spinlock;
@@ -102,7 +103,7 @@ static inline unsigned long tmh_avail_pa
}
/*
- * Ephemeral memory allocation for persistent data
+ * Memory allocation for persistent data
*/
static inline bool_t domain_fully_allocated(struct domain *d)
@@ -228,6 +229,8 @@ static inline struct page_info *tmh_allo
if ( pi == NULL && !no_heap )
pi = alloc_domheap_pages(0,0,MEMF_tmem);
ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
+ if ( pi != NULL )
+ atomic_inc(&freeable_page_count);
return pi;
}
@@ -235,11 +238,32 @@ static inline void tmh_free_page(struct
{
ASSERT(IS_VALID_PAGE(pi));
tmh_page_list_put(pi);
+ atomic_dec(&freeable_page_count);
}
static inline unsigned int tmem_subpage_maxsize(void)
{
return tmh_mempool_maxalloc;
+}
+
+static inline unsigned long tmh_freeable_mb(void)
+{
+ return (tmh_avail_pages() + _atomic_read(freeable_page_count)) >>
+ (20 - PAGE_SHIFT);
+}
+
+/*
+ * Memory allocation for "infrastructure" data
+ */
+
+static inline void *tmh_alloc_infra(size_t size, size_t align)
+{
+ return _xmalloc(size,align);
+}
+
+static inline void tmh_free_infra(void *p)
+{
+ return xfree(p);
}
#define tmh_lock_all opt_tmem_lock
tmem-freeable-090807.patch
Description: Binary data
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|