WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3 of 9] libxl: adds few more memory operations

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 3 of 9] libxl: adds few more memory operations
From: stefano.stabellini@xxxxxxxxxxxxx
Date: Fri, 27 Aug 2010 14:53:21 +0100
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Delivery-date: Fri, 27 Aug 2010 07:01:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <alpine.DEB.2.00.1008271440231.2545@kaball-desktop>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <alpine.DEB.2.00.1008271440231.2545@kaball-desktop>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
libxl_get_free_memory_slack: calculate the amount of memory that should
be left free in the system and write it on xenstore.

libxl_domain_need_memory: calculate how much memory a domain needs in
order to be built and start correctly.

libxl_get_free_memory: calculate the total free memory in the system.

libxl_wait_for_free_memory: wait for a certain amount of memory to
become free in the system.

libxl_wait_for_memory_target: wait for a domain to reach its memory
target.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

diff -r a59fa4d54851 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Wed Aug 25 21:01:37 2010 +0100
+++ b/tools/libxl/libxl.c       Thu Aug 26 19:06:04 2010 +0100
@@ -2755,9 +2755,11 @@ static int fill_dom0_memory_info(libxl_g
 {
     int rc;
     libxl_dominfo info;
+    libxl_physinfo physinfo;
     char *target = NULL, *endptr = NULL;
     char *target_path = "/local/domain/0/memory/target";
     char *max_path = "/local/domain/0/memory/static-max";
+    char *free_mem_slack_path = "/local/domain/0/memory/freemem-slack";
     xs_transaction_t t;
     libxl_ctx *ctx = libxl_gc_owner(gc);
 
@@ -2780,9 +2782,13 @@ retry_transaction:
     rc = libxl_domain_info(ctx, &info, 0);
     if (rc < 0)
         return rc;
+    rc = libxl_get_physinfo(ctx, &physinfo);
+    if (rc < 0)
+        return rc;
 
     libxl_xs_write(gc, t, target_path, "%"PRIu32, (uint32_t) 
info.target_memkb);
     libxl_xs_write(gc, t, max_path, "%"PRIu32, (uint32_t) info.max_memkb);
+    libxl_xs_write(gc, t, free_mem_slack_path, "%"PRIu32, (uint32_t) 
((physinfo.total_pages * 4) - info.target_memkb));
 
     *target_memkb = (uint32_t) info.target_memkb;
     rc = 0;
@@ -2796,6 +2802,33 @@ out:
     return rc;
 }
 
+int libxl_get_free_memory_slack(libxl_ctx *ctx, uint32_t *free_mem_slack)
+{
+    int rc;
+    libxl_gc gc = LIBXL_INIT_GC(ctx);
+    char *free_mem_slack_path = "/local/domain/0/memory/freemem-slack";
+    char *free_mem_slack_s, *endptr;
+    uint32_t target_memkb;
+
+retry:
+    free_mem_slack_s = libxl_xs_read(&gc, XBT_NULL, free_mem_slack_path);
+    if (!free_mem_slack_s) {
+        rc = fill_dom0_memory_info(&gc, &target_memkb);
+        if (rc < 0)
+            return rc;
+        goto retry;
+    } else {
+        *free_mem_slack = strtoul(free_mem_slack_s, &endptr, 10);
+        if (*endptr != '\0') {
+            XL_LOG_ERRNO(ctx, XL_LOG_ERROR,
+                    "invalid free_mem_slack %s from %s\n", free_mem_slack_s, 
free_mem_slack_path);
+            return ERROR_FAIL;
+        }
+    }
+    libxl_free_all(&gc);
+    return 0;
+}
+
 int libxl_set_memory_target(libxl_ctx *ctx, uint32_t domid, uint32_t 
target_memkb, int enforce)
 {
     libxl_gc gc = LIBXL_INIT_GC(ctx);
@@ -2994,6 +3027,90 @@ out:
     return rc;
 }
 
+int libxl_domain_need_memory(libxl_ctx *ctx, libxl_domain_build_info *b_info,
+        libxl_device_model_info *dm_info, uint32_t *need_memkb)
+{
+    *need_memkb = b_info->target_memkb;
+    if (b_info->hvm) {
+        *need_memkb += b_info->shadow_memkb + LIBXL_HVM_EXTRA_MEMORY;
+        if (strstr(dm_info->device_model, "stubdom-dm"))
+            *need_memkb += 32 * 1024;
+    } else
+        *need_memkb += LIBXL_PV_EXTRA_MEMORY;
+    if (*need_memkb % (2 * 1024))
+        *need_memkb += (2 * 1024) - (*need_memkb % (2 * 1024));
+    return 0;
+}
+
+int libxl_get_free_memory(libxl_ctx *ctx, uint32_t *memkb)
+{
+    int rc;
+    libxl_physinfo info;
+    uint32_t freemem_slack;
+
+    rc = libxl_get_physinfo(ctx, &info);
+    if (rc < 0)
+        return rc;
+    rc = libxl_get_free_memory_slack(ctx, &freemem_slack);
+    if (rc < 0)
+        return rc;
+
+    if ((info.free_pages + info.scrub_pages) * 4 > freemem_slack)
+        *memkb = (info.free_pages + info.scrub_pages) * 4 - freemem_slack;
+    else
+        *memkb = 0;
+    return 0;
+}
+
+int libxl_wait_for_free_memory(libxl_ctx *ctx, uint32_t domid, uint32_t 
memory_kb, int wait_secs)
+{
+    int rc = 0;
+    libxl_physinfo info;
+    uint32_t freemem_slack;
+
+    rc = libxl_get_free_memory_slack(ctx, &freemem_slack);
+    if (rc < 0)
+        return rc;
+    while (wait_secs > 0) {
+        rc = libxl_get_physinfo(ctx, &info);
+        if (rc < 0)
+            return rc;
+        if (info.free_pages * 4 - freemem_slack >= memory_kb)
+            return 0;
+        wait_secs--;
+        sleep(1);
+    }
+    return ERROR_NOMEM;
+}
+
+int libxl_wait_for_memory_target(libxl_ctx *ctx, uint32_t domid, int wait_secs)
+{
+    int rc = 0;
+    uint32_t target_memkb = 0;
+    libxl_dominfo info;
+
+    do {
+        wait_secs--;
+        sleep(1);
+
+        rc = libxl_get_memory_target(ctx, domid, &target_memkb);
+        if (rc < 0)
+            goto out;
+
+        rc = libxl_domain_info(ctx, &info, domid);
+        if (rc < 0)
+            return rc;
+    } while (wait_secs > 0 && info.target_memkb > target_memkb);
+
+    if (info.target_memkb <= target_memkb)
+        rc = 0;
+    else
+        rc = ERROR_FAIL;
+
+out:
+    return 0;
+}
+
 int libxl_button_press(libxl_ctx *ctx, uint32_t domid, libxl_button button)
 {
     int rc = -1;
diff -r a59fa4d54851 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Wed Aug 25 21:01:37 2010 +0100
+++ b/tools/libxl/libxl.h       Thu Aug 26 19:06:04 2010 +0100
@@ -324,6 +324,12 @@ int libxl_domain_setmaxmem(libxl_ctx *ct
 int libxl_set_memory_target(libxl_ctx *ctx, uint32_t domid, uint32_t 
target_memkb, int enforce);
 int libxl_set_relative_memory_target(libxl_ctx *ctx, uint32_t domid, int32_t 
relative_target_memkb, int enforce);
 int libxl_get_memory_target(libxl_ctx *ctx, uint32_t domid, uint32_t 
*out_target);
+int libxl_domain_need_memory(libxl_ctx *ctx, libxl_domain_build_info *b_info,
+        libxl_device_model_info *dm_info, uint32_t *need_memkb);
+int libxl_get_free_memory(libxl_ctx *ctx, uint32_t *memkb);
+int libxl_wait_for_free_memory(libxl_ctx *ctx, uint32_t domid, uint32_t 
memory_kb, int wait_secs);
+int libxl_wait_for_memory_target(libxl_ctx *ctx, uint32_t domid, int 
wait_secs);
+int libxl_get_free_memory_slack(libxl_ctx *ctx, uint32_t *free_mem_slack);
 
 int libxl_vncviewer_exec(libxl_ctx *ctx, uint32_t domid, int autopass);
 int libxl_console_exec(libxl_ctx *ctx, uint32_t domid, int cons_num, 
libxl_console_constype type);
diff -r a59fa4d54851 tools/libxl/libxl_internal.h
--- a/tools/libxl/libxl_internal.h      Wed Aug 25 21:01:37 2010 +0100
+++ b/tools/libxl/libxl_internal.h      Thu Aug 26 19:06:04 2010 +0100
@@ -41,6 +41,8 @@
 #define LIBXL_XENCONSOLE_LIMIT 1048576
 #define LIBXL_XENCONSOLE_PROTOCOL "vt100"
 #define LIBXL_MAXMEM_CONSTANT 1024
+#define LIBXL_PV_EXTRA_MEMORY 1024
+#define LIBXL_HVM_EXTRA_MEMORY 2048
 #define QEMU_SIGNATURE "QemuDeviceModelRecord"
 
 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel