[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 6/9] libxc/xen: introduce a start info structure for HVMlite guests



This structure contains the physical address of the command line, as well as
the physical address of the list of loaded modules. The physical address of
this structure is passed to the guest at boot time in the %ebx register.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Changes since v8:
 - Slightly reword comment.

Changes since v7:
 - Add a comment to clarify that nothing will be loaded at physical address
   0.
 - Add Andrew Cooper Reviewed-by.
 - Add Wei Liu Ack.

Changes since v6:
 - Add a check to make sure the start info data is placed below 4GB.
 - Make sure byte addresses are treated as uintptr_t.
 - Fix single-line comment.

Changes since v5:
 - Change some of the calculations performed to get the total size of the
   start_info region.
 - Replace the mention of HVMlite in a comment with PVH.
 - Don't use 64bit integers in hvm_modlist_entry.
---
 tools/libxc/xc_dom_x86.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++-
 xen/include/public/xen.h | 23 ++++++++++++++++
 2 files changed, 90 insertions(+), 1 deletion(-)

diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 341cf6e..dae75a5 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -623,7 +623,70 @@ static int alloc_magic_pages_hvm(struct xc_dom_image *dom)
     xc_hvm_param_set(xch, domid, HVM_PARAM_SHARING_RING_PFN,
                      special_pfn(SPECIALPAGE_SHARING));
 
-    if ( dom->device_model )
+    if ( !dom->device_model )
+    {
+        struct xc_dom_seg seg;
+        struct hvm_start_info *start_info;
+        char *cmdline;
+        struct hvm_modlist_entry *modlist;
+        void *start_page;
+        size_t cmdline_size = 0;
+        size_t start_info_size = sizeof(*start_info);
+
+        if ( dom->cmdline )
+        {
+            cmdline_size = ROUNDUP(strlen(dom->cmdline) + 1, 8);
+            start_info_size += cmdline_size;
+
+        }
+        if ( dom->ramdisk_blob )
+            start_info_size += sizeof(*modlist); /* Limited to one module. */
+
+        rc = xc_dom_alloc_segment(dom, &seg, "HVMlite start info", 0,
+                                  start_info_size);
+        if ( rc != 0 )
+        {
+            DOMPRINTF("Unable to reserve memory for the start info");
+            goto out;
+        }
+
+        start_page = xc_map_foreign_range(xch, domid, start_info_size,
+                                          PROT_READ | PROT_WRITE,
+                                          seg.pfn);
+        if ( start_page == NULL )
+        {
+            DOMPRINTF("Unable to map HVM start info page");
+            goto error_out;
+        }
+
+        start_info = start_page;
+        cmdline = start_page + sizeof(*start_info);
+        modlist = start_page + sizeof(*start_info) + cmdline_size;
+
+        if ( dom->cmdline )
+        {
+            strncpy(cmdline, dom->cmdline, MAX_GUEST_CMDLINE);
+            cmdline[MAX_GUEST_CMDLINE - 1] = '\0';
+            start_info->cmdline_paddr = (seg.pfn << PAGE_SHIFT) +
+                                ((uintptr_t)cmdline - (uintptr_t)start_info);
+        }
+
+        if ( dom->ramdisk_blob )
+        {
+            modlist[0].paddr = dom->ramdisk_seg.vstart - dom->parms.virt_base;
+            modlist[0].size = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
+            start_info->modlist_paddr = (seg.pfn << PAGE_SHIFT) +
+                                ((uintptr_t)modlist - (uintptr_t)start_info);
+            start_info->nr_modules = 1;
+        }
+
+        start_info->magic = HVM_START_MAGIC_VALUE;
+
+        munmap(start_page, start_info_size);
+
+        dom->start_info_pfn = seg.pfn;
+    }
+    else
     {
         /*
          * Allocate and clear additional ioreq server pages. The default
@@ -1022,6 +1085,9 @@ static int vcpu_hvm(struct xc_dom_image *dom)
     /* Set the IP. */
     bsp_ctx.cpu.rip = dom->parms.phys_entry;
 
+    if ( dom->start_info_pfn )
+        bsp_ctx.cpu.rbx = dom->start_info_pfn << PAGE_SHIFT;
+
     /* Set the end descriptor. */
     bsp_ctx.end_d.typecode = HVM_SAVE_CODE(END);
     bsp_ctx.end_d.instance = 0;
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index ff5547e..7b629b1 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -784,6 +784,29 @@ struct start_info {
 };
 typedef struct start_info start_info_t;
 
+/*
+ * Start of day structure passed to PVH guests in %ebx.
+ *
+ * NOTE: nothing will be loaded at physical address 0, so
+ * a 0 value in any of the address fields should be treated
+ * as not present.
+ */
+struct hvm_start_info {
+#define HVM_START_MAGIC_VALUE 0x336ec578
+    uint32_t magic;             /* Contains the magic value 0x336ec578       */
+                                /* ("xEn3" with the 0x80 bit of the "E" set).*/
+    uint32_t flags;             /* SIF_xxx flags.                            */
+    uint32_t cmdline_paddr;     /* Physical address of the command line.     */
+    uint32_t nr_modules;        /* Number of modules passed to the kernel.   */
+    uint32_t modlist_paddr;     /* Physical address of an array of           */
+                                /* hvm_modlist_entry.                        */
+};
+
+struct hvm_modlist_entry {
+    uint32_t paddr;             /* Physical address of the module.           */
+    uint32_t size;              /* Size of the module in bytes.              */
+};
+
 /* New console union for dom0 introduced in 0x00030203. */
 #if __XEN_INTERFACE_VERSION__ < 0x00030203
 #define console_mfn    console.domU.mfn
-- 
1.9.5 (Apple Git-50.3)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.