[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [RFC 1/3] hvm-stub for ia64: tools



diff -r 092232fa1fbd tools/libxc/ia64/Makefile
--- a/tools/libxc/ia64/Makefile Thu Nov 22 03:34:09 2007 +0100
+++ b/tools/libxc/ia64/Makefile Thu Nov 22 03:31:56 2007 +0100
@@ -1,6 +1,7 @@ CTRL_SRCS-y += ia64/xc_ia64_stubs.c
 CTRL_SRCS-y += ia64/xc_ia64_stubs.c
 
 GUEST_SRCS-y += ia64/xc_ia64_hvm_build.c
+GUEST_SRCS-y += ia64/xc_ia64_hvmstub_build.c
 GUEST_SRCS-y += ia64/xc_ia64_linux_save.c
 GUEST_SRCS-y += ia64/xc_ia64_linux_restore.c
 
diff -r 092232fa1fbd tools/libxc/ia64/xc_ia64_hvmstub_build.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/ia64/xc_ia64_hvmstub_build.c  Thu Nov 22 03:31:56 2007 +0100
@@ -0,0 +1,239 @@
+#include <asm/kregs.h>
+#include "xg_private.h"
+#include "xenguest.h"
+#include "xc_private.h"
+#include "xc_elf.h"
+#include "xc_efi.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <zlib.h>
+#include "xen/arch-ia64.h"
+#include <xen/hvm/ioreq.h>
+#include <xen/hvm/params.h>
+
+static int
+xc_ia64_copy_to_domain_pages(int xc_handle, uint32_t domid, void* src_page,
+                             unsigned long dst_pfn, int nr_pages)
+{
+    // N.B. gva should be page aligned
+    int i;
+
+    for (i = 0; i < nr_pages; i++) {
+        if (xc_copy_to_domain_page(xc_handle, domid, dst_pfn + i,
+                                   src_page + (i << PAGE_SHIFT)))
+            return -1;
+    }
+
+    return 0;
+}
+
+#define GFW_PAGES (GFW_SIZE >> PAGE_SHIFT)
+
+/*
+ * In this function, we will allocate memory and build P2M/M2P table for VTI
+ * guest.  Frist, a pfn list will be initialized discontiguous, normal memory
+ * begins with 0, GFW memory and other five pages at their place defined in
+ * xen/include/public/arch-ia64.h xc_domain_memory_populate_physmap() called
+ * five times, to set parameter 'extent_order' to different value, this is
+ * convenient to allocate discontiguous memory with different size.
+ */
+static int
+setup_guest(int xc_handle, uint32_t dom, unsigned long mem_mb,
+            char *image, unsigned long image_size,
+            unsigned long *store_mfn, unsigned long *console_mfn)
+{
+    xen_pfn_t *pfn_list;
+    void *buffer_page;
+    unsigned long dom_mem_mb = mem_mb << 20;
+    unsigned long nr_pages = mem_mb << (20 - PAGE_SHIFT);
+    unsigned long nr_special_pages;
+    int rc;
+    long i;
+    DECLARE_DOMCTL;
+
+    if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
+        PERROR("Guest firmware size is incorrect [%ld]?", image_size);
+        return -1;
+    }
+
+    pfn_list = malloc(nr_pages * sizeof(xen_pfn_t));
+    if (pfn_list == NULL) {
+        PERROR("Could not allocate memory.\n");
+        return -1;
+    }
+
+    // Allocate pfn for normal memory
+    for (i = 0; i < (dom_mem_mb >> PAGE_SHIFT); i++)
+        pfn_list[i] = i;
+
+    // If normal memory > 3G. Reserve 3G ~ 4G for MMIO, GFW and others.
+    for (i = (MMIO_START >> PAGE_SHIFT); i < (dom_mem_mb >> PAGE_SHIFT); i++)
+        pfn_list[i] += ((1 * MEM_G) >> PAGE_SHIFT);
+
+    // Allocate memory for VTI guest.
+    rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
+                                           0, 0, &pfn_list[0]);
+    if (rc != 0) {
+        PERROR("Could not allocate normal memory for Vti guest.\n");
+        goto error_out;
+    }
+
+    // We allocate additional pfn for GFW and other five pages, so
+    // the pfn_list is not contiguous.  Due to this we must support
+    // old interface xc_ia64_get_pfn_list().
+    for (i = 0; i < GFW_PAGES; i++) 
+        pfn_list[i] = (GFW_START >> PAGE_SHIFT) + i;
+
+    rc = xc_domain_memory_populate_physmap(xc_handle, dom, GFW_PAGES,
+                                           0, 0, &pfn_list[0]);
+    if (rc != 0) {
+        PERROR("Could not allocate GFW memory for Vti guest.\n");
+        goto error_out;
+    }
+
+#define CONSOLE_PAGE_START IO_PAGE_START
+
+    nr_special_pages = 0;
+    pfn_list[nr_special_pages] = CONSOLE_PAGE_START >> PAGE_SHIFT;
+    nr_special_pages++;
+    pfn_list[nr_special_pages] = STORE_PAGE_START >> PAGE_SHIFT;
+    nr_special_pages++;
+
+    *console_mfn = pfn_list[0];
+    *store_mfn = pfn_list[1];
+
+    rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_special_pages,
+                                           0, 0, &pfn_list[0]);
+    if (rc != 0) {
+        PERROR("Could not allocate console/store/startinfo page.\n");
+        goto error_out;
+    }
+
+    domctl.u.arch_setup.flags = XEN_DOMAINSETUP_hvmstub_guest;
+    domctl.u.arch_setup.bp = 0;
+    domctl.u.arch_setup.maxmem = 0;
+    domctl.cmd = XEN_DOMCTL_arch_setup;
+    domctl.domain = (domid_t)dom;
+    if (xc_domctl(xc_handle, &domctl))
+        goto error_out;
+
+    // Load guest firmware 
+    if (xc_ia64_copy_to_domain_pages(xc_handle, dom, image,
+                            (GFW_START + GFW_SIZE - image_size) >> PAGE_SHIFT,
+                            image_size >> PAGE_SHIFT)) {
+        PERROR("Could not load guest firmware into domain");
+        goto error_out;
+    }
+
+
+    // Retrieve special pages like io, xenstore, etc. 
+    buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+                                       PROT_READ | PROT_WRITE,
+                                       pfn_list[0]);
+    if (buffer_page == 0)
+        goto error_out;
+    memset(buffer_page, 0, PAGE_SIZE);
+    munmap(buffer_page, PAGE_SIZE);
+
+    buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+                                       PROT_READ | PROT_WRITE,
+                                       pfn_list[1]);
+    if (buffer_page == 0)
+        goto error_out;
+    memset(buffer_page, 0, PAGE_SIZE);
+    munmap(buffer_page, PAGE_SIZE);
+
+    free(pfn_list);
+    return 0;
+
+error_out:
+    free(pfn_list);
+    return -1;
+}
+
+int
+xc_hvmstub_build(int xc_handle,
+                 uint32_t domid,
+                 unsigned int mem_mb,
+                 const char *image_name,
+                 unsigned int store_evtchn,
+                 unsigned long *store_mfn,
+                 unsigned int console_evtchn,
+                 unsigned long *console_mfn)
+{
+    struct xen_domctl launch_domctl;
+    int rc;
+    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
+    char *image = NULL;
+    unsigned long image_size;
+    DECLARE_DOMCTL;
+
+    fprintf (stderr, "Using firmware: %s\n", image_name);
+
+    image = xc_read_image(image_name, &image_size);
+    if (image == NULL) {
+        PERROR("Could not read guest firmware image %s", image_name);
+        goto error_out;
+    }
+
+    domctl.cmd = XEN_DOMCTL_getdomaininfo;
+    domctl.domain = (domid_t)domid;
+    if (xc_domctl(xc_handle, &domctl) < 0) {
+        PERROR("Could not get info on domain");
+        goto error_out;
+    }
+
+    if (setup_guest(xc_handle, domid, (unsigned long)mem_mb, image,
+                    image_size, store_mfn, console_mfn) < 0) {
+        ERROR("Error constructing guest OS");
+        goto error_out;
+    }
+
+    free(image);
+
+    if (lock_pages(&st_ctxt, sizeof(st_ctxt))) {
+        PERROR("Unable to lock_pages ctxt");
+        return 1;
+    }
+
+    memset(ctxt, 0, sizeof(*ctxt));
+
+    /*ctxt->regs.ip = 0x80000000ffffffb0UL; */
+    ctxt->regs.ip = 0x80000000ff800000;
+    ctxt->regs.ar.fpsr = xc_ia64_fpsr_default();
+    ctxt->regs.cr.isr = 1UL << 63;
+    ctxt->regs.psr = IA64_PSR_AC | IA64_PSR_BN;
+    ctxt->regs.cr.dcr = 0;
+    ctxt->regs.cr.pta = 15 << 2;
+
+    /* Boot parameters.  */
+    ctxt->regs.r[8] = (*store_mfn << PAGE_SHIFT) | store_evtchn;
+    ctxt->regs.r[9] = (*console_mfn << PAGE_SHIFT) | console_evtchn;
+    ctxt->regs.r[10] = mem_mb;
+    ctxt->regs.r[11] = domctl.u.getdomaininfo.max_vcpu_id + 1;
+
+    memset(&launch_domctl, 0, sizeof(launch_domctl));
+
+    launch_domctl.domain = (domid_t)domid;
+    launch_domctl.u.vcpucontext.vcpu = 0;
+    set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
+
+    launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
+    rc = do_domctl(xc_handle, &launch_domctl);
+    unlock_pages(&st_ctxt, sizeof(st_ctxt));
+    return rc;
+
+error_out:
+    free(image);
+    return -1;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 092232fa1fbd tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Thu Nov 22 03:34:09 2007 +0100
+++ b/tools/libxc/xenguest.h    Thu Nov 22 03:31:56 2007 +0100
@@ -136,6 +136,15 @@ int xc_hvm_build_mem(int xc_handle,
                      const char *image_buffer,
                      unsigned long image_size);
 
+int xc_hvmstub_build(int xc_handle,
+                    uint32_t domid,
+                    unsigned int mem_mb,
+                    const char *image_name,
+                    unsigned int store_evtchn,
+                    unsigned long *store_mfn,
+                    unsigned int console_evtchn,
+                    unsigned long *console_mfn);
+
 /* PowerPC specific. */
 int xc_prose_build(int xc_handle,
                    uint32_t domid,
diff -r 092232fa1fbd tools/libxc/xg_private.c
--- a/tools/libxc/xg_private.c  Thu Nov 22 03:34:09 2007 +0100
+++ b/tools/libxc/xg_private.c  Thu Nov 22 03:31:56 2007 +0100
@@ -197,6 +197,21 @@ __attribute__((weak))
     errno = ENOSYS;
     return -1;
 }
+
+__attribute__((weak)) 
+     int xc_hvmstub_build(int xc_handle,
+                          uint32_t domid,
+                          unsigned int mem_mb,
+                          const char *image_name,
+                          unsigned int store_evtchn,
+                          unsigned long *store_mfn,
+                          unsigned int console_evtchn,
+                          unsigned long *console_mfn)
+{
+    errno = ENOSYS;
+    return -1;
+}
+    
 
 /*
  * Local variables:
diff -r 092232fa1fbd tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Nov 22 03:34:09 2007 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Nov 22 03:31:56 2007 +0100
@@ -653,6 +653,39 @@ static PyObject *pyxc_hvm_build(XcObject
     return Py_BuildValue("{}");
 }
 
+static PyObject *pyxc_hvmstub_build(XcObject *self,
+                                   PyObject *args,
+                                   PyObject *kwds)
+{
+    uint32_t dom;
+    char *image;
+    int store_evtchn, console_evtchn;
+    unsigned int mem_mb;
+    char *features = NULL;
+    unsigned long store_mfn = 0;
+    unsigned long console_mfn = 0;
+
+    static char *kwd_list[] = { "domid", "store_evtchn", "memsize",
+                                "console_evtchn", "image",
+                                "features", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiis|s", kwd_list,
+                                      &dom, &store_evtchn, &mem_mb,
+                                     &console_evtchn, &image,
+                                     /* optionam */
+                                     &features) )
+        return NULL;
+
+    if ( xc_hvmstub_build(self->xc_handle, dom, mem_mb, image,
+                         store_evtchn, &store_mfn,
+                         console_evtchn, &console_mfn) != 0 )
+        return pyxc_error_to_exception();
+
+    return Py_BuildValue("{s:i,s:i}",
+                        "store_mfn", store_mfn,
+                        "console_mfn", console_mfn);
+}
+
 static PyObject *pyxc_evtchn_alloc_unbound(XcObject *self,
                                            PyObject *args,
                                            PyObject *kwds)
@@ -1392,6 +1425,14 @@ static PyMethodDef pyxc_methods[] = {
       " dom     [int]:      Identifier of domain to build into.\n"
       " image   [str]:      Name of HVM loader image file.\n"
       " vcpus   [int, 1]:   Number of Virtual CPUS in domain.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "hvmstub_build", 
+      (PyCFunction)pyxc_hvmstub_build, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Build a new HVMStub guest OS.\n"
+      " dom     [int]:      Identifier of domain to build into.\n"
+      " image   [str]:      Name of HVM loader image file.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "hvm_get_param", 
diff -r 092232fa1fbd tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Thu Nov 22 03:34:09 2007 +0100
+++ b/tools/python/xen/xend/image.py    Thu Nov 22 03:31:56 2007 +0100
@@ -570,6 +570,45 @@ class IA64_Linux_ImageHandler(LinuxImage
         self.vhpt = int(vmConfig['platform'].get('vhpt',  0))
 
 
+class IA64_HVMStub_ImageHandler(ImageHandler):
+
+    ostype = "hvmstub"
+
+    def getRequiredAvailableMemory(self, mem_kb):
+        page_kb = 16
+        # ROM size for guest firmware, io page, xenstore page
+        # buffer io page, buffer pio page and memmap info page
+        extra_pages = 1024 + 5
+        return mem_kb + extra_pages * page_kb
+
+    def getRequiredInitialReservation(self):
+        return self.vm.getMemoryTarget()
+
+    def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
+        # Explicit shadow memory is not a concept 
+        return 0
+
+    def buildDomain(self):
+        store_evtchn = self.vm.getStorePort()
+        console_evtchn = self.vm.getConsolePort()
+
+        mem_mb = self.getRequiredInitialReservation() / 1024
+
+        log.debug("domid          = %d", self.vm.getDomid())
+        log.debug("memsize        = %d", mem_mb)
+        log.debug("image          = %s", self.kernel)
+        log.debug("store_evtchn   = %d", store_evtchn)
+        log.debug("console_evtchn = %d", console_evtchn)
+        log.debug("features       = %s", self.vm.getFeatures())
+
+        return xc.hvmstub_build(domid          = self.vm.getDomid(),
+                                memsize        = mem_mb,
+                                image          = self.kernel,
+                                store_evtchn   = store_evtchn,
+                                console_evtchn = console_evtchn,
+                                features       = self.vm.getFeatures())
+
+
 class X86_HVM_ImageHandler(HVMImageHandler):
 
     def configure(self, vmConfig):
@@ -616,6 +655,7 @@ _handlers = {
     "ia64": {
         "linux": IA64_Linux_ImageHandler,
         "hvm": IA64_HVM_ImageHandler,
+        "hvmstub": IA64_HVMStub_ImageHandler,
     },
     "x86": {
         "linux": X86_Linux_ImageHandler,

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.