WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH/RFC] memory_map + set_memmap_limit hypercall/domctl

To: xen-devel@xxxxxxxxxxxxxxxxxxx, Keir Fraser <Keir.Fraser@xxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH/RFC] memory_map + set_memmap_limit hypercall/domctl
From: Glauber de Oliveira Costa <gcosta@xxxxxxxxxx>
Date: Mon, 27 Nov 2006 16:56:55 -0200
Delivery-date: Mon, 27 Nov 2006 10:57:10 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.11
Here's a new shot of the memory map related hypercall/domctl

As previously requested by Keir, I'm adding a new hypercall,
(set_memmap_limit), whose purpose is to inform hypervisor about the 
maximum value the physical limit of a domain can grow to.

I consider it close to a final version, but I'd still like to receive
comments on some specific points (besides any other that you see fit,
for sure ;-) )

* I'm assuming that a (int) representation of max_pages being negative
will always means it's unlimited for this guest, and we don't want to
rely on this for stabilishing our phys map. Keir, can it be granted?

* I'm assuming that the domctl to set the physicall mapping cannot be
called more than once, and returning an EINVAL in such a case. Is it
reasonable?

Thanks!

-- 
Glauber de Oliveira Costa
Red Hat Inc.
"Free as in Freedom"
--- ./xen/arch/x86/mm.c.orig    2006-11-24 09:15:25.000000000 -0500
+++ ./xen/arch/x86/mm.c 2006-11-27 14:52:55.000000000 -0500
@@ -2974,7 +2974,45 @@ long arch_memory_op(int op, XEN_GUEST_HA
 
     case XENMEM_memory_map:
     {
-        return -ENOSYS;
+        struct xen_memory_map memmap;
+        struct domain *d;
+        XEN_GUEST_HANDLE(e820entry_t) buffer;
+        struct e820entry map;
+    
+
+        d = current->domain;
+
+        if ( copy_from_guest(&memmap, arg, 1) )
+            return -EFAULT;
+
+        buffer = guest_handle_cast(memmap.buffer, e820entry_t);
+        if ( unlikely(guest_handle_is_null(buffer)) ) 
+            return -EFAULT;
+
+        memmap.nr_entries = 1;
+
+        /* if we were not supplied with proper information, we try to use  
+         * current max_pages information as an upper bound. If it's zero or
+         * unlimited, tot_pages is all that is left for us */ / 
+        if ( d->memmap_limit ) 
+            map.size = d->memmap_limit;
+        else if ( (int)d->max_pages > 0 )
+            map.size = d->max_pages << PAGE_SHIFT;
+               else
+            map.size = d->tot_pages << PAGE_SHIFT;
+
+        /* 8MB slack (to balance backend allocations). */
+        map.size += 8 << 20;
+        map.addr = 0ULL;
+        map.type = E820_RAM;
+
+        if ( copy_to_guest(arg, &memmap, 1) )
+            return -EFAULT;
+
+        if ( copy_to_guest(buffer, &map, 1) < 0 )
+            return -EFAULT;
+
+        return 0;
     }
 
     case XENMEM_machine_memory_map:
--- ./xen/include/xen/sched.h.orig      2006-11-24 09:15:33.000000000 -0500
+++ ./xen/include/xen/sched.h   2006-11-27 13:59:33.000000000 -0500
@@ -112,6 +112,7 @@ struct domain
     struct list_head xenpage_list;    /* linked list, of size xenheap_pages */
     unsigned int     tot_pages;       /* number of pages currently possesed */
     unsigned int     max_pages;       /* maximum value for tot_pages        */
+    unsigned int     memmap_limit;    /* the higher our memory map can go   */ 
     unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
 
     /* Scheduling. */
--- ./xen/include/public/domctl.h.orig  2006-11-24 09:16:17.000000000 -0500
+++ ./xen/include/public/domctl.h       2006-11-27 13:55:18.000000000 -0500
@@ -348,6 +348,14 @@ struct xen_domctl_settimeoffset {
 typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
 
+#define XEN_DOMCTL_memmap_limit       26
+struct xen_domctl_memmap_limit {
+    /* IN variables. */
+    uint64_t map_limitkb;
+};
+typedef struct xen_domctl_memory_map_limit xen_domctl_map_limit_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_map_limit_t);
+
 struct xen_domctl {
     uint32_t cmd;
     uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
@@ -373,9 +381,11 @@ struct xen_domctl {
         struct xen_domctl_hypercall_init    hypercall_init;
         struct xen_domctl_arch_setup        arch_setup;
         struct xen_domctl_settimeoffset     settimeoffset;
-        uint8_t                             pad[128];
+        struct xen_domctl_memmap_limit      memmap_limit;
+        uint8_t                             pad[120];
     } u;
 };
+
 typedef struct xen_domctl xen_domctl_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
 
--- ./xen/common/domctl.c.orig  2006-11-24 09:16:03.000000000 -0500
+++ ./xen/common/domctl.c       2006-11-27 13:54:27.000000000 -0500
@@ -546,6 +546,29 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
     }
     break;
 
+    case XEN_DOMCTL_memmap_limit:
+    {
+        struct domain *d;
+        unsigned long new_limit; 
+
+        ret = -ESRCH;
+        d = find_domain_by_id(op->domain);
+        if ( d == NULL )
+            break;
+
+        ret = -EINVAL;
+        new_limit = op->u.memmap_limit.map_limitkb << 10; 
+
+        /* No point in neither calling it more than once, nor
+                * setting physical map limit to zero */
+        if ( (d->memmap_limit == 0) && (new_limit != 0) )
+        {
+            d->memmap_limit = new_limit;
+            ret = 0;
+        }
+
+        put_domain(d);
+    }
     case XEN_DOMCTL_setdomainhandle:
     {
         struct domain *d;
--- ./tools/libxc/xenctrl.h.orig        2006-11-24 09:23:07.000000000 -0500
+++ ./tools/libxc/xenctrl.h     2006-11-27 13:53:09.000000000 -0500
@@ -407,6 +407,10 @@ int xc_domain_setmaxmem(int xc_handle,
                         uint32_t domid,
                         unsigned int max_memkb);
 
+int xc_domain_set_memmap_limit(int xc_handle,
+                            uint32_t domid,
+                            unsigned int map_limitkb);
+
 int xc_domain_set_time_offset(int xc_handle,
                               uint32_t domid,
                               int32_t time_offset_seconds);
--- ./tools/libxc/xc_domain.c.orig      2006-11-24 09:18:08.000000000 -0500
+++ ./tools/libxc/xc_domain.c   2006-11-27 13:57:25.000000000 -0500
@@ -313,6 +313,17 @@ int xc_domain_setmaxmem(int xc_handle,
     return do_domctl(xc_handle, &domctl);
 }
 
+int xc_domain_set_memmap_limit(int xc_handle,
+                            uint32_t domid,
+                            unsigned int map_limitkb)
+{
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_memmap_limit;
+    domctl.domain = (domid_t)domid;
+    domctl.u.memmap_limit.map_limitkb = map_limitkb;
+    return do_domctl(xc_handle, &domctl);
+}
+
 int xc_domain_set_time_offset(int xc_handle,
                               uint32_t domid,
                               int32_t time_offset_seconds)
--- ./tools/python/xen/lowlevel/xc/xc.c.orig    2006-11-27 12:54:49.000000000 
-0500
+++ ./tools/python/xen/lowlevel/xc/xc.c 2006-11-27 14:05:25.000000000 -0500
@@ -705,6 +705,21 @@ static PyObject *pyxc_domain_setmaxmem(X
     return zero;
 }
 
+static PyObject *pyxc_domain_set_memmap_limit(XcObject *self, PyObject *args)
+{
+    uint32_t dom;
+    unsigned int maplimit_kb;
+
+    if (!PyArg_ParseTuple(args, "ii", &dom, &maplimit_kb))
+        return NULL;
+
+    if (xc_domain_set_memmap_limit(self->xc_handle, dom, maplimit_kb) != 0)
+        return PyErr_SetFromErrno(xc_error);
+    
+    Py_INCREF(zero);
+    return zero;
+}
+
 static PyObject *pyxc_domain_memory_increase_reservation(XcObject *self,
                                                          PyObject *args,
                                                          PyObject *kwds)
@@ -1082,6 +1097,14 @@ static PyMethodDef pyxc_methods[] = {
       " maxmem_kb [int]: .\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
+    { "domain_set_memmap_limit", 
+      (PyCFunction)pyxc_domain_set_memmap_limit, 
+      METH_VARARGS, "\n"
+      "Set a domain's physical memory mappping limit\n"
+      " dom [int]: Identifier of domain.\n"
+      " map_limitkb [int]: .\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
     { "domain_memory_increase_reservation", 
       (PyCFunction)pyxc_domain_memory_increase_reservation, 
       METH_VARARGS | METH_KEYWORDS, "\n"
--- ./tools/python/xen/xend/XendDomainInfo.py.orig      2006-11-27 
12:57:15.000000000 -0500
+++ ./tools/python/xen/xend/XendDomainInfo.py   2006-11-27 13:58:47.000000000 
-0500
@@ -1330,6 +1330,9 @@ class XendDomainInfo:
 
             # set memory limit
             xc.domain_setmaxmem(self.domid, maxmem)
+       
+            # set physical mapping limit
+            xc.domain_set_memmap_limit(self.domid, maxmem)
 
             # Make sure there's enough RAM available for the domain
             balloon.free(memory + shadow)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>