WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [TOOLS] Paravirt guests have their memory

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [TOOLS] Paravirt guests have their memory allocated in the
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 02 Nov 2006 22:10:17 +0000
Delivery-date: Thu, 02 Nov 2006 21:45:18 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID e6fdb32b786c3e57b9c641274607603897b97334
# Parent  70687bcb82dd0b2d5813e6125c95ff908e25c94d
[TOOLS] Paravirt guests have their memory allocated in the
libxenguest builder function.

Again, PPC and IA64 will need to do some fixing up.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/libxc/xc_linux_build.c            |   96 ++++++++++----------------------
 tools/libxc/xenguest.h                  |   32 ++++++----
 tools/python/xen/lowlevel/xc/xc.c       |    9 +--
 tools/python/xen/xend/XendDomainInfo.py |   11 ---
 tools/python/xen/xend/image.py          |   24 +++++---
 5 files changed, 72 insertions(+), 100 deletions(-)

diff -r 70687bcb82dd -r e6fdb32b786c tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Thu Nov 02 07:46:06 2006 +0000
+++ b/tools/libxc/xc_linux_build.c      Thu Nov 02 11:11:28 2006 +0000
@@ -23,12 +23,6 @@
 #elif defined(__x86_64__)
 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#endif
-
-#ifdef __ia64__
-#define get_tot_pages xc_get_max_pages
-#else
-#define get_tot_pages xc_get_tot_pages
 #endif
 
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
@@ -674,7 +668,6 @@ static int setup_guest(int xc_handle,
     int hypercall_page_defined;
     start_info_t *start_info;
     shared_info_t *shared_info;
-    xc_mmu_t *mmu = NULL;
     const char *p;
     DECLARE_DOMCTL;
     int rc;
@@ -716,7 +709,7 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
-    if (!compat_check(xc_handle, &dsi))
+    if ( !compat_check(xc_handle, &dsi) )
         goto error_out;
 
     /* Parse and validate kernel features. */
@@ -751,9 +744,13 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
-    if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
-    {
-        PERROR("Could not get the page frame list");
+    for ( i = 0; i < nr_pages; i++ )
+        page_array[i] = i;
+
+    if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
+                                           0, 0, page_array) )
+    {
+        PERROR("Could not allocate memory for PV guest.\n");
         goto error_out;
     }
 
@@ -885,9 +882,8 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
-    /* setup page tables */
 #if defined(__i386__)
-    if (dsi.pae_kernel != PAEKERN_no)
+    if ( dsi.pae_kernel != PAEKERN_no )
         rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
                                  dsi.v_start, v_end,
                                  page_array, vpt_start, vpt_end,
@@ -904,16 +900,16 @@ static int setup_guest(int xc_handle,
                             page_array, vpt_start, vpt_end,
                             shadow_mode_enabled);
 #endif
-    if (0 != rc)
-        goto error_out;
-
-#if defined(__i386__)
+    if ( rc != 0 )
+        goto error_out;
+
     /*
      * Pin down l2tab addr as page dir page - causes hypervisor to provide
      * correct protection for the page
      */
     if ( !shadow_mode_enabled )
     {
+#if defined(__i386__)
         if ( dsi.pae_kernel != PAEKERN_no )
         {
             if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
@@ -926,40 +922,24 @@ static int setup_guest(int xc_handle,
                            xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
                 goto error_out;
         }
-    }
+#elif defined(__x86_64__)
+        /*
+         * Pin down l4tab addr as page dir page - causes hypervisor to  provide
+         * correct protection for the page
+         */
+        if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
+                       xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
+            goto error_out;
 #endif
-
-#if defined(__x86_64__)
-    /*
-     * Pin down l4tab addr as page dir page - causes hypervisor to  provide
-     * correct protection for the page
-     */
-    if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
-                   xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
-        goto error_out;
-#endif
-
-    if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
-        goto error_out;
-
-    /* Write the phys->machine and machine->phys table entries. */
+    }
+
+    /* Write the phys->machine table entries (machine->phys already done). */
     physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
     physmap = physmap_e = xc_map_foreign_range(
         xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
         page_array[physmap_pfn++]);
-
     for ( count = 0; count < nr_pages; count++ )
     {
-        if ( xc_add_mmu_update(
-            xc_handle, mmu,
-            ((uint64_t)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
-            count) )
-        {
-            DPRINTF("m2p update failure p=%lx m=%"PRIx64"\n",
-                    count, (uint64_t)page_array[count]);
-            munmap(physmap, PAGE_SIZE);
-            goto error_out;
-        }
         *physmap_e++ = page_array[count];
         if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
         {
@@ -970,10 +950,6 @@ static int setup_guest(int xc_handle,
         }
     }
     munmap(physmap, PAGE_SIZE);
-
-    /* Send the page update requests down to the hypervisor. */
-    if ( xc_finish_mmu_updates(xc_handle, mmu) )
-        goto error_out;
 
     if ( shadow_mode_enabled )
     {
@@ -1081,10 +1057,6 @@ static int setup_guest(int xc_handle,
 
     munmap(shared_info, PAGE_SIZE);
 
-    /* Send the page update requests down to the hypervisor. */
-    if ( xc_finish_mmu_updates(xc_handle, mmu) )
-        goto error_out;
-
     hypercall_page = xen_elfnote_numeric(&dsi, XEN_ELFNOTE_HYPERCALL_PAGE,
                                          &hypercall_page_defined);
     if ( hypercall_page_defined )
@@ -1100,7 +1072,6 @@ static int setup_guest(int xc_handle,
             goto error_out;
     }
 
-    free(mmu);
     free(page_array);
 
     *pvsi = vstartinfo_start;
@@ -1110,7 +1081,6 @@ static int setup_guest(int xc_handle,
     return 0;
 
  error_out:
-    free(mmu);
     free(page_array);
     return -1;
 }
@@ -1118,6 +1088,7 @@ static int setup_guest(int xc_handle,
 
 static int xc_linux_build_internal(int xc_handle,
                                    uint32_t domid,
+                                   unsigned int mem_mb,
                                    char *image,
                                    unsigned long image_size,
                                    struct initrd_info *initrd,
@@ -1132,8 +1103,7 @@ static int xc_linux_build_internal(int x
     struct xen_domctl launch_domctl;
     DECLARE_DOMCTL;
     int rc, i;
-    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
-    unsigned long nr_pages;
+    struct vcpu_guest_context st_ctxt, *ctxt = &st_ctxt;
     unsigned long vstartinfo_start, vkern_entry, vstack_start;
     uint32_t      features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
 
@@ -1144,12 +1114,6 @@ static int xc_linux_build_internal(int x
             PERROR("Failed to parse configured features\n");
             goto error_out;
         }
-    }
-
-    if ( (nr_pages = get_tot_pages(xc_handle, domid)) < 0 )
-    {
-        PERROR("Could not find total pages for domain");
-        goto error_out;
     }
 
 #ifdef VALGRIND
@@ -1175,7 +1139,7 @@ static int xc_linux_build_internal(int x
 
     if ( setup_guest(xc_handle, domid, image, image_size,
                      initrd,
-                     nr_pages,
+                     mem_mb << (20 - PAGE_SHIFT),
                      &vstartinfo_start, &vkern_entry,
                      &vstack_start, ctxt, cmdline,
                      domctl.u.getdomaininfo.shared_info_frame,
@@ -1271,6 +1235,7 @@ static int xc_linux_build_internal(int x
 
 int xc_linux_build_mem(int xc_handle,
                        uint32_t domid,
+                       unsigned int mem_mb,
                        const char *image_buffer,
                        unsigned long image_size,
                        const char *initrd,
@@ -1319,7 +1284,7 @@ int xc_linux_build_mem(int xc_handle,
         }
     }
 
-    sts = xc_linux_build_internal(xc_handle, domid, img_buf, img_len,
+    sts = xc_linux_build_internal(xc_handle, domid, mem_mb, img_buf, img_len,
                                   &initrd_info, cmdline, features, flags,
                                   store_evtchn, store_mfn,
                                   console_evtchn, console_mfn);
@@ -1339,6 +1304,7 @@ int xc_linux_build_mem(int xc_handle,
 
 int xc_linux_build(int xc_handle,
                    uint32_t domid,
+                   unsigned int mem_mb,
                    const char *image_name,
                    const char *initrd_name,
                    const char *cmdline,
@@ -1375,7 +1341,7 @@ int xc_linux_build(int xc_handle,
         }
     }
 
-    sts = xc_linux_build_internal(xc_handle, domid, image, image_size,
+    sts = xc_linux_build_internal(xc_handle, domid, mem_mb, image, image_size,
                                   &initrd_info, cmdline, features, flags,
                                   store_evtchn, store_mfn,
                                   console_evtchn, console_mfn);
diff -r 70687bcb82dd -r e6fdb32b786c tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Thu Nov 02 07:46:06 2006 +0000
+++ b/tools/libxc/xenguest.h    Thu Nov 02 11:11:28 2006 +0000
@@ -48,8 +48,9 @@ int xc_linux_restore(int xc_handle, int 
  *
  * @parm xc_handle a handle to an open hypervisor interface
  * @parm domid the id of the domain
- * @param image_name name of the kernel image file
- * @param ramdisk_name name of the ramdisk image file
+ * @parm mem_mb memory size in megabytes
+ * @parm image_name name of the kernel image file
+ * @parm ramdisk_name name of the ramdisk image file
  * @parm cmdline command line string
  * @parm flags domain creation flags
  * @parm store_evtchn the store event channel for this domain to use
@@ -60,6 +61,7 @@ int xc_linux_restore(int xc_handle, int 
  */
 int xc_linux_build(int xc_handle,
                    uint32_t domid,
+                   unsigned int mem_mb,
                    const char *image_name,
                    const char *ramdisk_name,
                    const char *cmdline,
@@ -74,22 +76,24 @@ int xc_linux_build(int xc_handle,
  * This function will create a domain for a paravirtualized Linux
  * using buffers for kernel and initrd
  *
- * @param xc_handle a handle to an open hypervisor interface
- * @param domid the id of the domain
- * @param image_buffer buffer containing kernel image
- * @param image_size size of the kernel image buffer
- * @param initrd_buffer name of the ramdisk image file
- * @param initrd_size size of the ramdisk buffer
- * @param cmdline command line string
- * @param flags domain creation flags
- * @param store_evtchn the store event channel for this domain to use
- * @param store_mfn returned with the mfn of the store page
- * @param console_evtchn the console event channel for this domain to use
- * @param conole_mfn returned with the mfn of the console page
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the id of the domain
+ * @parm mem_mb memory size in megabytes
+ * @parm image_buffer buffer containing kernel image
+ * @parm image_size size of the kernel image buffer
+ * @parm initrd_buffer name of the ramdisk image file
+ * @parm initrd_size size of the ramdisk buffer
+ * @parm cmdline command line string
+ * @parm flags domain creation flags
+ * @parm store_evtchn the store event channel for this domain to use
+ * @parm store_mfn returned with the mfn of the store page
+ * @parm console_evtchn the console event channel for this domain to use
+ * @parm conole_mfn returned with the mfn of the console page
  * @return 0 on success, -1 on failure
  */
 int xc_linux_build_mem(int xc_handle,
                        uint32_t domid,
+                       unsigned int mem_mb,
                        const char *image_buffer,
                        unsigned long image_size,
                        const char *initrd_buffer,
diff -r 70687bcb82dd -r e6fdb32b786c tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Nov 02 07:46:06 2006 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Nov 02 11:11:28 2006 +0000
@@ -334,24 +334,25 @@ static PyObject *pyxc_linux_build(XcObje
     char *image, *ramdisk = NULL, *cmdline = "", *features = NULL;
     int flags = 0;
     int store_evtchn, console_evtchn;
+    unsigned int mem_mb;
     unsigned long store_mfn = 0;
     unsigned long console_mfn = 0;
 
-    static char *kwd_list[] = { "domid", "store_evtchn",
+    static char *kwd_list[] = { "domid", "store_evtchn", "memsize",
                                 "console_evtchn", "image",
                                 /* optional */
                                 "ramdisk", "cmdline", "flags",
                                 "features", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiis|ssis", kwd_list,
-                                      &dom, &store_evtchn,
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiis|ssis", kwd_list,
+                                      &dom, &store_evtchn, &mem_mb,
                                       &console_evtchn, &image,
                                       /* optional */
                                       &ramdisk, &cmdline, &flags,
                                       &features) )
         return NULL;
 
-    if ( xc_linux_build(self->xc_handle, dom, image,
+    if ( xc_linux_build(self->xc_handle, dom, mem_mb, image,
                         ramdisk, cmdline, features, flags,
                         store_evtchn, &store_mfn,
                         console_evtchn, &console_mfn) != 0 ) {
diff -r 70687bcb82dd -r e6fdb32b786c tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Nov 02 07:46:06 2006 +0000
+++ b/tools/python/xen/xend/XendDomainInfo.py   Thu Nov 02 11:11:28 2006 +0000
@@ -1270,10 +1270,7 @@ class XendDomainInfo:
 
             # Use architecture- and image-specific calculations to determine
             # the various headrooms necessary, given the raw configured
-            # values.
-            # reservation, maxmem, memory, and shadow are all in KiB.
-            reservation = self.image.getRequiredInitialReservation(
-                self.info['memory'] * 1024)
+            # values. maxmem, memory, and shadow are all in KiB.
             maxmem = self.image.getRequiredAvailableMemory(
                 self.info['maxmem'] * 1024)
             memory = self.image.getRequiredAvailableMemory(
@@ -1295,12 +1292,6 @@ class XendDomainInfo:
             # Set up the shadow memory
             shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
             self.info['shadow_memory'] = shadow_cur
-
-            # Initial memory reservation
-            if not (self._infoIsSet('image') and
-                    sxp.name(self.info['image']) == "hvm"):
-                xc.domain_memory_increase_reservation(
-                    self.domid, reservation, 0, 0)
 
             self._createChannels()
 
diff -r 70687bcb82dd -r e6fdb32b786c tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Thu Nov 02 07:46:06 2006 +0000
+++ b/tools/python/xen/xend/image.py    Thu Nov 02 11:11:28 2006 +0000
@@ -152,13 +152,13 @@ class ImageHandler:
         necessary."""
         return mem_kb
 
-    def getRequiredInitialReservation(self, mem_kb):
+    def getRequiredInitialReservation(self):
         """@param mem_kb The configured memory, in KiB.
         @return The corresponding required amount of memory to be free, also
         in KiB. This is normally the same as getRequiredAvailableMemory, but
         architecture- or image-specific code may override this to
         add headroom where necessary."""
-        return self.getRequiredAvailableMemory(mem_kb)
+        return self.getRequiredAvailableMemory(self.vm.getMemoryTarget())
 
     def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
         """@param shadow_mem_kb The configured shadow memory, in KiB.
@@ -189,7 +189,10 @@ class LinuxImageHandler(ImageHandler):
         store_evtchn = self.vm.getStorePort()
         console_evtchn = self.vm.getConsolePort()
 
+        mem_mb = self.getRequiredInitialReservation() / 1024
+
         log.debug("domid          = %d", self.vm.getDomid())
+        log.debug("memsize        = %d", mem_mb)
         log.debug("image          = %s", self.kernel)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("console_evtchn = %d", console_evtchn)
@@ -199,6 +202,7 @@ class LinuxImageHandler(ImageHandler):
         log.debug("features       = %s", self.vm.getFeatures())
 
         return xc.linux_build(domid          = self.vm.getDomid(),
+                              memsize        = mem_mb,
                               image          = self.kernel,
                               store_evtchn   = store_evtchn,
                               console_evtchn = console_evtchn,
@@ -218,7 +222,10 @@ class PPC_LinuxImageHandler(LinuxImageHa
         store_evtchn = self.vm.getStorePort()
         console_evtchn = self.vm.getConsolePort()
 
+        mem_mb = self.getRequiredInitialReservation() / 1024
+
         log.debug("domid          = %d", self.vm.getDomid())
+        log.debug("memsize        = %d", mem_mb)
         log.debug("image          = %s", self.kernel)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("console_evtchn = %d", console_evtchn)
@@ -230,6 +237,7 @@ class PPC_LinuxImageHandler(LinuxImageHa
         devtree = FlatDeviceTree.build(self)
 
         return xc.linux_build(domid          = self.vm.getDomid(),
+                              memsize        = mem_mb,
                               image          = self.kernel,
                               store_evtchn   = store_evtchn,
                               console_evtchn = console_evtchn,
@@ -272,10 +280,12 @@ class HVMImageHandler(ImageHandler):
     def buildDomain(self):
         store_evtchn = self.vm.getStorePort()
 
+        mem_mb = self.getRequiredInitialReservation() / 1024
+
         log.debug("domid          = %d", self.vm.getDomid())
         log.debug("image          = %s", self.kernel)
         log.debug("store_evtchn   = %d", store_evtchn)
-        log.debug("memsize        = %d", self.vm.getMemoryTarget() / 1024)
+        log.debug("memsize        = %d", mem_mb)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
         log.debug("pae            = %d", self.pae)
         log.debug("acpi           = %d", self.acpi)
@@ -286,7 +296,7 @@ class HVMImageHandler(ImageHandler):
         return xc.hvm_build(domid          = self.vm.getDomid(),
                             image          = self.kernel,
                             store_evtchn   = store_evtchn,
-                            memsize        = self.vm.getMemoryTarget() / 1024,
+                            memsize        = mem_mb,
                             vcpus          = self.vm.getVCpuCount(),
                             pae            = self.pae,
                             acpi           = self.acpi,
@@ -401,7 +411,7 @@ class HVMImageHandler(ImageHandler):
         #todo: Error handling
         args = [self.device_model]
         args = args + ([ "-d",  "%d" % self.vm.getDomid(),
-                  "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
+                  "-m", "%s" % (self.getRequiredInitialReservation() / 1024)])
         args = args + self.dmargs
         env = dict(os.environ)
         if self.display:
@@ -480,8 +490,8 @@ class X86_HVM_ImageHandler(HVMImageHandl
         # Add 8 MiB overhead for QEMU's video RAM.
         return mem_kb + 8192
 
-    def getRequiredInitialReservation(self, mem_kb):
-        return mem_kb
+    def getRequiredInitialReservation(self):
+        return self.vm.getMemoryTarget()
 
     def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
         # 256 pages (1MB) per vcpu,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [TOOLS] Paravirt guests have their memory allocated in the, Xen patchbot-unstable <=