WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 10 of 18] libxc: add xc_maximum_ram_page to wrap XENM

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 10 of 18] libxc: add xc_maximum_ram_page to wrap XENMEM_maximum_ram_page
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Tue, 12 Oct 2010 15:16:28 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Tue, 12 Oct 2010 07:32:15 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1286892978@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1286892402 -3600
# Node ID 4a56557e18e05fdba2f8bb9f477fb33760c3814b
# Parent  e2e86e7d7af71f12956af780bd23cc53134920e5
libxc: add xc_maximum_ram_page to wrap XENMEM_maximum_ram_page

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r e2e86e7d7af7 -r 4a56557e18e0 tools/libxc/xc_offline_page.c
--- a/tools/libxc/xc_offline_page.c     Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_offline_page.c     Tue Oct 12 15:06:42 2010 +0100
@@ -271,7 +271,7 @@ static int init_mem_info(xc_interface *x
 
     dinfo->p2m_size = minfo->p2m_size;
 
-    minfo->max_mfn = xc_memory_op(xch, XENMEM_maximum_ram_page, NULL);
+    minfo->max_mfn = xc_maximum_ram_page(xch);
     if ( !(minfo->m2p_table =
         xc_map_m2p(xch, minfo->max_mfn, PROT_READ, NULL)) )
     {
diff -r e2e86e7d7af7 -r 4a56557e18e0 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
@@ -533,6 +533,10 @@ int xc_memory_op(xc_interface *xch,
     return ret;
 }
 
+long xc_maximum_ram_page(xc_interface *xch)
+{
+    return xc_memory_op(xch, XENMEM_maximum_ram_page, NULL);
+}
 
 long long xc_domain_get_cpu_usage( xc_interface *xch, domid_t domid, int vcpu )
 {
diff -r e2e86e7d7af7 -r 4a56557e18e0 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xenctrl.h     Tue Oct 12 15:06:42 2010 +0100
@@ -982,6 +982,9 @@ int xc_mmuext_op(xc_interface *xch, stru
 int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
                  domid_t dom);
 
+/* System wide memory properties */
+long xc_maximum_ram_page(xc_interface *xch);
+
 int xc_memory_op(xc_interface *xch, int cmd, void *arg);
 
 
diff -r e2e86e7d7af7 -r 4a56557e18e0 tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h     Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xg_save_restore.h     Tue Oct 12 15:06:42 2010 +0100
@@ -179,7 +179,7 @@ static inline int get_platform_info(xc_i
     if (xc_version(xch, XENVER_capabilities, &xen_caps) != 0)
         return 0;
 
-    *max_mfn = xc_memory_op(xch, XENMEM_maximum_ram_page, NULL);
+    *max_mfn = xc_maximum_ram_page(xch);
 
     *hvirt_start = xen_params.virt_start;
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>