[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 4/9] xen/arm: static memory initialization


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <sstabellini@xxxxxxxxxx>, <julien@xxxxxxx>, <jbeulich@xxxxxxxx>
  • From: Penny Zheng <penny.zheng@xxxxxxx>
  • Date: Mon, 7 Jun 2021 02:43:13 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=1pQ5ZYSIi8AeHiMoc+JqHBOk6Ajg45oD8n/vlgw9XyM=; b=XUMNC1gDAdLNBmsvr6PjXFeKV4S1I9YBMl1oiHTN1B7BD8gBdzCUtR05cwb4J1IGHDxk6mt/xgFdxDIuVBNu8VlJk3gqYkWpHfJrfCOSL40FheaFEpGxnrUj74z4/lqHmdkQDiiKYy1i8hX3179azDJqKE6bD2CXmYlxkLKQLlGeHGnfNBhtD8BZqE13CRvdBS5DYPs+agGcmX3EhDFlKibZ/jlfOZSe9yYv3DHI7vy+NwPovjpICFbkGa9gzocYfoq2pZuIxeP8x8WpnnXJJh8AdmrKgheKMRm8+VZ1L6sCte9aLtogtARyoB4Rlwc9lJ6XY5Ue5WtPFpv0/T8OSw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=auQr5+Hw8bVa50IMqI76MdGxrxH7fBhq5+mqE6MP8hhFXIzs71vGdIm4yas3v40DEw1zdAdlca7lbTrTLpc6ZI/6DpNzcgiHBr2xfDRaLMoSfuh8jUBJXWrtCyyrsO9bOpeSQMGZFFL7R4KlriVeHMn6IWPDtAGIpo2z4rGPa1VILH9NOkxP3ykaRQZUtBwrQWQvBLOvdIObBmvBIktU6WNKgdlBbH7KBRiLG+QqBh6g0X/p0tirnP5pxC4M56kXJm46zi/qw4qVYgdisrR//lu9bG5Y1bwQturXJgYlzBvQl2apuKxjCGkrrWHobagWHB5E9Ypg/WhfJ51vWb40kg==
  • Cc: <Bertrand.Marquis@xxxxxxx>, <Penny.Zheng@xxxxxxx>, <Wei.Chen@xxxxxxx>
  • Delivery-date: Mon, 07 Jun 2021 02:44:07 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

This patch introduces static memory initialization, during system RAM boot up.

New func init_staticmem_pages is responsible for static memory initialization.

Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
nr_mfns pages of static memory.

This commit defines a new helper free_page to extract common code between
free_heap_pages and free_staticmem_pages, like following the same cache/TLB
coherency policy.

For each page, free_staticmem_pages includes the following extra steps to
initialize:
1. change page state from inuse to free state and grant PGC_reserved.
2. scrub the page in need synchronously.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
changes v2:
- rename to nr_mfns
- extract common code from free_heap_pages and free_staticmem_pages
- remove dead codes in other archs, including move some to arm-specific file,
and put some under CONFIG_ARM
- mark free_staticmem_pages __init
---
 xen/arch/arm/setup.c    | 27 ++++++++++++++
 xen/common/page_alloc.c | 78 +++++++++++++++++++++++++++++++++--------
 xen/include/xen/mm.h    |  6 ++++
 3 files changed, 97 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 00aad1c194..daafea0abb 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -611,6 +611,30 @@ static void __init init_pdx(void)
     }
 }
 
+/* Static memory initialization */
+static void __init init_staticmem_pages(void)
+{
+    int bank;
+
+    /*
+     * TODO: Considering NUMA-support scenario.
+     */
+    for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
+    {
+        paddr_t bank_start = bootinfo.static_mem.bank[bank].start;
+        paddr_t bank_size = bootinfo.static_mem.bank[bank].size;
+        paddr_t bank_end = bank_start + bank_size;
+
+        bank_start = round_pgup(bank_start);
+        bank_end = round_pgdown(bank_end);
+        if ( bank_end <= bank_start )
+            return;
+
+        free_staticmem_pages(maddr_to_page(bank_start),
+                            (bank_end - bank_start) >> PAGE_SHIFT, false);
+    }
+}
+
 #ifdef CONFIG_ARM_32
 static void __init setup_mm(void)
 {
@@ -872,6 +896,9 @@ void __init start_xen(unsigned long boot_phys_offset,
     cmdline_parse(cmdline);
 
     setup_mm();
+    /* If exists, Static Memory Initialization. */
+    if ( bootinfo.static_mem.nr_banks > 0 )
+        init_staticmem_pages();
 
     /* Parse the ACPI tables for possible boot-time configuration */
     acpi_boot_table_init();
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 958ba0cd92..8c00262c04 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1376,6 +1376,37 @@ bool scrub_free_pages(void)
     return node_to_scrub(false) != NUMA_NO_NODE;
 }
 
+static void free_page(struct page_info *pg, bool need_scrub)
+{
+    mfn_t mfn = page_to_mfn(pg);
+
+    /* If a page has no owner it will need no safety TLB flush. */
+    pg->u.free.need_tlbflush = (page_get_owner(pg) != NULL);
+    if ( pg->u.free.need_tlbflush )
+        page_set_tlbflush_timestamp(pg);
+
+    /* This page is not a guest frame any more. */
+    page_set_owner(pg, NULL); /* set_gpfn_from_mfn snoops pg owner */
+    set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
+
+#ifdef CONFIG_ARM
+    if ( pg->count_info & PGC_reserved )
+    {
+        /* TODO: asynchronous scrubbing. */
+        if ( need_scrub )
+            scrub_one_page(pg);
+        return;
+    }
+#endif
+    if ( need_scrub )
+    {
+        pg->count_info |= PGC_need_scrub;
+        poison_one_page(pg);
+    }
+
+    return;
+}
+
 /* Free 2^@order set of pages. */
 static void free_heap_pages(
     struct page_info *pg, unsigned int order, bool need_scrub)
@@ -1425,20 +1456,7 @@ static void free_heap_pages(
             BUG();
         }
 
-        /* If a page has no owner it will need no safety TLB flush. */
-        pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
-        if ( pg[i].u.free.need_tlbflush )
-            page_set_tlbflush_timestamp(&pg[i]);
-
-        /* This page is not a guest frame any more. */
-        page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
-        set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
-
-        if ( need_scrub )
-        {
-            pg[i].count_info |= PGC_need_scrub;
-            poison_one_page(&pg[i]);
-        }
+        free_page(&pg[i], need_scrub);
     }
 
     avail[node][zone] += 1 << order;
@@ -1512,6 +1530,38 @@ static void free_heap_pages(
     spin_unlock(&heap_lock);
 }
 
+#ifdef CONFIG_STATIC_ALLOCATION
+/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
+void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+                                 bool need_scrub)
+{
+    mfn_t mfn = page_to_mfn(pg);
+    unsigned long i;
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        switch ( pg[i].count_info & PGC_state )
+        {
+        case PGC_state_inuse:
+            BUG_ON(pg[i].count_info & PGC_broken);
+            /* Mark it free and reserved. */
+            pg[i].count_info = PGC_state_free | PGC_reserved;
+            break;
+
+        default:
+            printk(XENLOG_ERR
+                   "Page state shall be only in PGC_state_inuse. "
+                   "pg[%lu] MFN %"PRI_mfn" count_info=%#lx 
tlbflush_timestamp=%#x.\n",
+                   i, mfn_x(mfn) + i,
+                   pg[i].count_info,
+                   pg[i].tlbflush_timestamp);
+            BUG();
+        }
+
+        free_page(&pg[i], need_scrub);
+    }
+}
+#endif
 
 /*
  * Following rules applied for page offline:
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 667f9dac83..df25e55966 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,6 +85,12 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
+#ifdef CONFIG_ARM
+/* Static Allocation */
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+                          bool need_scrub);
+#endif
+
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(
     unsigned long virt,
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.