[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 4/7] xen/arm: static memory initialization


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <sstabellini@xxxxxxxxxx>, <julien@xxxxxxx>
  • From: Penny Zheng <penny.zheng@xxxxxxx>
  • Date: Tue, 24 Aug 2021 09:50:42 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=zA8vK69jWi9elV7ywSF5xfDQGg45S54vXFg9HWwKfbA=; b=YaDuTvjDpZ5y25XMDrBsRcxRsKouJAn6E6FRd3hUjqqcUr/Bw1/YahP4BGcPme5dN68Ne2dUi4QJzT+++dsRTFNZYsRMsBNehhkdRl/qaWdLHGHOFVZDNWLFeFec+ZKTmw63myZStlI7yUaI3Jbzo0Cbj5zAaiA+GNvu6mw1GOjzkLkAGgtTGF8r35YxLo4fn25mTRr2Pgm29zZboIKjLuZm4Gz/R/uOTmr+YWdHGHb1XH3JMWugUADSArWWmRr7OcSoFw3qsub2w5LFyOarGSzTndSipn3J2cBQbhIG2WYGp3C8d8ft/lQHov25pLEUhBmvOPtq5zjtDLbzCYBetA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=Q8ktLPRGGRW4fQ6Aa7S7NNSWoWs1VEK2p2hfcf0lRi3z8fdG2gpUGgFE9hQv3NVbdp1zS/KDCo+fijqyst8/QINmhrhoMqVZJdYnHXX/B8jM8tOjx7xRXy7WyBrj4Bmszy2xYhYnUx/yXx0FKF36Pf2RP4uCuqsK6OsxBDS7JuKQBFrVvpzcBn3G6G5aFmIfG7/4Ute+BYOQ3gMlMRKLgtFmA7y6IY/j9tj79qbAws9libcrQ/5jwQMMkSMMkUi3YNMp0j0DUZrupbXFvGIMb38mViAUIkS6RfszEIVxieyJPu2uHV3dcqQxlOMOF5AsJlvbbllzbfQDAAZ7OB5Jkw==
  • Cc: <Bertrand.Marquis@xxxxxxx>, <Penny.Zheng@xxxxxxx>, <Wei.Chen@xxxxxxx>, <nd@xxxxxxx>
  • Delivery-date: Tue, 24 Aug 2021 09:51:41 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

This patch introduces static memory initialization, during system boot up.

The new function init_staticmem_pages is responsible for static memory
initialization.

Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
nr_mfns pages of static memory.

This commit also introduces new CONFIG_STATIC_MEMORY to wrap all
static-allocation-related codes.

Put asynchronous scrubbing for pages of static memory in TODO list.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
v5 change:
- make CONFIG_STATIC_MEMORY user selectable and gated by UNSUPPORTED.
- wrap all static-allocation-related codes with CONFIG_STATIC_MEMORY
even in arm-specific file.
- make bank_start/bank_end type of mfn_t, and rename bank_size to
bank_pages.
---
 xen/arch/arm/setup.c    | 31 +++++++++++++++++++++++++++++++
 xen/common/Kconfig      | 17 +++++++++++++++++
 xen/common/page_alloc.c | 22 +++++++++++++++++++++-
 xen/include/xen/mm.h    |  6 ++++++
 4 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 63a908e325..44aca9f1b9 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -609,6 +609,29 @@ static void __init init_pdx(void)
     }
 }
 
+#ifdef CONFIG_STATIC_MEMORY
+/* Static memory initialization */
+static void __init init_staticmem_pages(void)
+{
+    unsigned int bank;
+
+    for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
+    {
+        if ( bootinfo.reserved_mem.bank[bank].xen_domain )
+        {
+            mfn_t bank_start = 
_mfn(PFN_UP(bootinfo.reserved_mem.bank[bank].start));
+            unsigned long bank_pages = 
PFN_DOWN(bootinfo.reserved_mem.bank[bank].size);
+            mfn_t bank_end = mfn_add(bank_start, bank_pages);
+
+            if ( mfn_x(bank_end) <= mfn_x(bank_start) )
+                return;
+
+            free_staticmem_pages(mfn_to_page(bank_start), bank_pages, false);
+        }
+    }
+}
+#endif
+
 #ifdef CONFIG_ARM_32
 static void __init setup_mm(void)
 {
@@ -736,6 +759,10 @@ static void __init setup_mm(void)
     /* Add xenheap memory that was not already added to the boot allocator. */
     init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
                        mfn_to_maddr(xenheap_mfn_end));
+
+#ifdef CONFIG_STATIC_MEMORY
+    init_staticmem_pages();
+#endif
 }
 #else /* CONFIG_ARM_64 */
 static void __init setup_mm(void)
@@ -789,6 +816,10 @@ static void __init setup_mm(void)
 
     setup_frametable_mappings(ram_start, ram_end);
     max_page = PFN_DOWN(ram_end);
+
+#ifdef CONFIG_STATIC_MEMORY
+    init_staticmem_pages();
+#endif
 }
 #endif
 
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 0ddd18e11a..514a2c9022 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -67,6 +67,23 @@ config MEM_ACCESS
 config NEEDS_LIBELF
        bool
 
+config STATIC_MEMORY
+        bool "Static Allocation Support (UNSUPPORTED)" if UNSUPPORTED
+       depends on ARM
+       ---help---
+         Static Allocation refers to system or sub-system(domains) for
+         which memory areas are pre-defined by configuration using physical
+          address ranges.
+
+         Those pre-defined memory, -- Static Memory, as parts of RAM reserved
+         during system boot-up, shall never go to heap allocator or boot
+         allocator for any use.
+
+         When enabled, memory can be statically allocated to a domain using
+         the property "xen,static-mem" defined in the domain configuration.
+
+         If unsure, say Y.
+
 menu "Speculative hardening"
 
 config SPECULATIVE_HARDEN_ARRAY
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a3ee5eca9e..2b4591bc56 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1519,7 +1519,6 @@ static void free_heap_pages(
     spin_unlock(&heap_lock);
 }
 
-
 /*
  * Following rules applied for page offline:
  * Once a page is broken, it can't be assigned anymore
@@ -2604,6 +2603,27 @@ struct domain *get_pg_owner(domid_t domid)
     return pg_owner;
 }
 
+#ifdef CONFIG_STATIC_MEMORY
+/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
+void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+                                 bool need_scrub)
+{
+    mfn_t mfn = page_to_mfn(pg);
+    unsigned long i;
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        mark_page_free(&pg[i], mfn_add(mfn, i));
+
+        if ( need_scrub )
+        {
+            /* TODO: asynchronous scrubbing for pages of static memory. */
+            scrub_one_page(pg);
+        }
+    }
+}
+#endif
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 667f9dac83..8e8fb5a615 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,6 +85,12 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
+#ifdef CONFIG_STATIC_MEMORY
+/* These functions are for static memory */
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+                          bool need_scrub);
+#endif
+
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
     unsigned long virt,
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.