[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 4/4] x86/shim: only mark special pages as RAM in pvshim mode



When running Xen as a guest it's not necessary to mark such pages as
RAM because they won't be assigned to the initial domain memory map.

While there move the functions to the PV shim specific file and rename
them accordingly.

No functional change expected.

Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/e820.c             |  4 +--
 xen/arch/x86/guest/xen.c        | 42 --------------------------------
 xen/arch/x86/pv/shim.c          | 43 +++++++++++++++++++++++++++++++++
 xen/common/page_alloc.c         |  4 +--
 xen/include/asm-x86/guest/xen.h | 13 ----------
 xen/include/asm-x86/pv/shim.h   | 12 +++++++++
 6 files changed, 59 insertions(+), 59 deletions(-)

diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index 2407367c57..32a4e9a57e 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -756,8 +756,8 @@ unsigned long __init init_e820(const char *str, struct 
e820map *raw)
 
     machine_specific_memory_setup(raw);
 
-    if ( xen_guest )
-        hypervisor_fixup_e820(&e820);
+    if ( pv_shim )
+        pv_shim_fixup_e820(&e820);
 
     printk("%s RAM map:\n", str);
     print_e820_memory_map(e820.map, e820.nr_map);
diff --git a/xen/arch/x86/guest/xen.c b/xen/arch/x86/guest/xen.c
index 8cee880adc..7b7a5badab 100644
--- a/xen/arch/x86/guest/xen.c
+++ b/xen/arch/x86/guest/xen.c
@@ -40,7 +40,6 @@ bool __read_mostly xen_guest;
 static __read_mostly uint32_t xen_cpuid_base;
 extern char hypercall_page[];
 static struct rangeset *mem;
-static struct platform_bad_page __initdata reserved_pages[2];
 
 DEFINE_PER_CPU(unsigned int, vcpu_id);
 
@@ -302,47 +301,6 @@ int hypervisor_free_unused_page(mfn_t mfn)
     return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
 }
 
-static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
-{
-    if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
-                         (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) )
-        if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
-                                     (pfn << PAGE_SHIFT) + PAGE_SIZE,
-                                     E820_RESERVED, E820_RAM) )
-            panic("Unable to add/change memory type of pfn %#lx to RAM\n", 
pfn);
-}
-
-void __init hypervisor_fixup_e820(struct e820map *e820)
-{
-    uint64_t pfn = 0;
-    unsigned int i = 0;
-    long rc;
-
-    ASSERT(xen_guest);
-
-#define MARK_PARAM_RAM(p) ({                    \
-    rc = xen_hypercall_hvm_get_param(p, &pfn);  \
-    if ( rc )                                   \
-        panic("Unable to get " #p "\n");        \
-    mark_pfn_as_ram(e820, pfn);                 \
-    ASSERT(i < ARRAY_SIZE(reserved_pages));     \
-    reserved_pages[i++].mfn = pfn;              \
-})
-    MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
-    if ( !pv_console )
-        MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
-#undef MARK_PARAM_RAM
-}
-
-const struct platform_bad_page *__init hypervisor_reserved_pages(unsigned int 
*size)
-{
-    ASSERT(xen_guest);
-
-    *size = ARRAY_SIZE(reserved_pages);
-
-    return reserved_pages;
-}
-
 uint32_t hypervisor_cpuid_base(void)
 {
     return xen_cpuid_base;
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index cdc72f787d..551bc5c8d8 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -54,6 +54,8 @@ static DEFINE_SPINLOCK(grant_lock);
 static PAGE_LIST_HEAD(balloon);
 static DEFINE_SPINLOCK(balloon_lock);
 
+static struct platform_bad_page __initdata reserved_pages[2];
+
 static long pv_shim_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) 
arg);
 static long pv_shim_grant_table_op(unsigned int cmd,
                                    XEN_GUEST_HANDLE_PARAM(void) uop,
@@ -113,6 +115,47 @@ uint64_t pv_shim_mem(uint64_t avail)
     return shim_nrpages;
 }
 
+static void __init mark_pfn_as_ram(struct e820map *e820, uint64_t pfn)
+{
+    if ( !e820_add_range(e820, pfn << PAGE_SHIFT,
+                         (pfn << PAGE_SHIFT) + PAGE_SIZE, E820_RAM) )
+        if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
+                                     (pfn << PAGE_SHIFT) + PAGE_SIZE,
+                                     E820_RESERVED, E820_RAM) )
+            panic("Unable to add/change memory type of pfn %#lx to RAM\n", 
pfn);
+}
+
+void pv_shim_fixup_e820(struct e820map *e820)
+{
+    uint64_t pfn = 0;
+    unsigned int i = 0;
+    long rc;
+
+    ASSERT(xen_guest);
+
+#define MARK_PARAM_RAM(p) ({                    \
+    rc = xen_hypercall_hvm_get_param(p, &pfn);  \
+    if ( rc )                                   \
+        panic("Unable to get " #p "\n");        \
+    mark_pfn_as_ram(e820, pfn);                 \
+    ASSERT(i < ARRAY_SIZE(reserved_pages));     \
+    reserved_pages[i++].mfn = pfn;              \
+})
+    MARK_PARAM_RAM(HVM_PARAM_STORE_PFN);
+    if ( !pv_console )
+        MARK_PARAM_RAM(HVM_PARAM_CONSOLE_PFN);
+#undef MARK_PARAM_RAM
+}
+
+const struct platform_bad_page *__init pv_shim_reserved_pages(unsigned int 
*size)
+{
+    ASSERT(xen_guest);
+
+    *size = ARRAY_SIZE(reserved_pages);
+
+    return reserved_pages;
+}
+
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER| \
                  _PAGE_GUEST_KERNEL)
 #define COMPAT_L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e591601f9c..37a52aaa0d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -348,9 +348,9 @@ void __init init_boot_pages(paddr_t ps, paddr_t pe)
         }
     }
 
-    if ( xen_guest )
+    if ( pv_shim )
     {
-        badpage = hypervisor_reserved_pages(&array_size);
+        badpage = pv_shim_reserved_pages(&array_size);
         if ( badpage )
         {
             for ( i = 0; i < array_size; i++ )
diff --git a/xen/include/asm-x86/guest/xen.h b/xen/include/asm-x86/guest/xen.h
index 6f15e24b6b..7e04e4a7ab 100644
--- a/xen/include/asm-x86/guest/xen.h
+++ b/xen/include/asm-x86/guest/xen.h
@@ -36,8 +36,6 @@ void hypervisor_setup(void);
 void hypervisor_ap_setup(void);
 int hypervisor_alloc_unused_page(mfn_t *mfn);
 int hypervisor_free_unused_page(mfn_t mfn);
-void hypervisor_fixup_e820(struct e820map *e820);
-const struct platform_bad_page *hypervisor_reserved_pages(unsigned int *size);
 uint32_t hypervisor_cpuid_base(void);
 void hypervisor_resume(void);
 
@@ -60,17 +58,6 @@ static inline void hypervisor_ap_setup(void)
     ASSERT_UNREACHABLE();
 }
 
-static inline void hypervisor_fixup_e820(struct e820map *e820)
-{
-    ASSERT_UNREACHABLE();
-}
-
-static inline const struct platform_bad_page 
*hypervisor_reserved_pages(unsigned int *size)
-{
-    ASSERT_UNREACHABLE();
-    return NULL;
-}
-
 #endif /* CONFIG_XEN_GUEST */
 #endif /* __X86_GUEST_XEN_H__ */
 
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index fb739772df..b8818dfde7 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -43,6 +43,8 @@ void pv_shim_online_memory(unsigned int nr, unsigned int 
order);
 void pv_shim_offline_memory(unsigned int nr, unsigned int order);
 domid_t get_initial_domain_id(void);
 uint64_t pv_shim_mem(uint64_t avail);
+void pv_shim_fixup_e820(struct e820map *e820);
+const struct platform_bad_page *pv_shim_reserved_pages(unsigned int *size);
 
 #else
 
@@ -91,6 +93,16 @@ static inline uint64_t pv_shim_mem(uint64_t avail)
     ASSERT_UNREACHABLE();
     return 0;
 }
+static inline void pv_shim_fixup_e820(struct e820map *e820)
+{
+    ASSERT_UNREACHABLE();
+}
+static inline const struct platform_bad_page
+*pv_shim_reserved_pages(unsigned int *s)
+{
+    ASSERT_UNREACHABLE();
+    return NULL;
+}
 
 #endif
 
-- 
2.17.2 (Apple Git-113)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.