[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 6/6] xen/arm: retrieve reserved pages on populate_physmap


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Mon, 18 Apr 2022 20:22:51 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=LpppyH+TZRPMImSMb65msgwXr8BDg+yB7jYmPGh/pdI=; b=PqpgCseApIDd0YTSoWNfojInYWQspPzDvckhhD0cBJq05WhFoKJDkvSOizkyfEQURo5tJ96/nzHd+V6t2Pcp0QEcJvMq2g1+TuJIQGDRxhuDDF2hfcZOBq8P+yzH93M/vYD2g3QO0QqHcTsRvCFkkEntpwRyW5R4c777NwRQTK80+acENv2hZnrI3AjzgizPJhU54XiEXC2rkgHyg6PHmSH72CBF98L8Ve1AkKGSpxvidms2LKxmXblGowYNgfINKobncMP5PTWaSwypNvCyZ3eTlI7N4dQG4OWPdwGw9f+OpNjk1Tb/JKkG9gOXKqGI2gfSm6jzi2xOVScOUn9lAg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=JjWGpL5fQ0fNuvmUduLTGOrJxUMWmvOIMyZZy1wlYP8OrlWOV6Yt6/gBJIhU/MX70DgMQ1/3xmPtYm4gJ+P5KQ5apLARF8LngJvcLcCeN5PgPZ/oH89PWzQOhg6gFA401sIZBL4qhdZJItIwu2xIf3phXQWPOgPHWJU+8SmBxXmDmD0h+Cn2Ine2kVdWeiC1+hLEkEN/vO7ekdsRDkGX0pin14/g9C1EUdUr++BwK2E3xQe2YuZeoGgoyvB8v7n4XMXAIDdkkbSx6lukz1J/S6S9nnAWLQTQ6uMSM9qRQ+vmE7pMWrEYLbqXkgm2xnBA+rv8KhBidZljIrD9b3vl0w==
  • Cc: <wei.chen@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, "Jan Beulich" <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, "Stefano Stabellini" <sstabellini@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Penny Zheng <penny.zheng@xxxxxxx>
  • Delivery-date: Mon, 18 Apr 2022 12:24:25 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

When static domain populates memory through populate_physmap on runtime,
other than allocating from heap, it shall retrieve reserved pages from
resv_page_list to make sure that guest RAM is still restricted in statically
configured memory regions. And this commit introduces a new helper
acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c     | 29 +++++++++++++++++++++++++++++
 xen/common/page_alloc.c | 28 ++++++++++++++++++++++++++++
 xen/include/xen/mm.h    |  1 +
 3 files changed, 58 insertions(+)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 69b0cd1e50..f7729dfa5c 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -35,6 +35,10 @@
 #include <asm/guest.h>
 #endif
 
+#ifndef is_domain_static
+#define is_domain_static(d) ((void)(d), false)
+#endif
+
 struct memop_args {
     /* INPUT */
     struct domain *domain;     /* Domain to be affected. */
@@ -245,6 +249,31 @@ static void populate_physmap(struct memop_args *a)
 
                 mfn = _mfn(gpfn);
             }
+#ifdef CONFIG_STATIC_MEMORY
+            else if ( is_domain_static(d) )
+            {
+                /*
+                 * No easy way to guarantee the retreived pages are contiguous,
+                 * so forbid non-zero-order requests here.
+                 */
+                if ( a->extent_order != 0 )
+                {
+                    gdprintk(XENLOG_INFO,
+                             "Could not allocate non-zero-order pages for 
static %pd.\n.",
+                             d);
+                    goto out;
+                }
+
+                mfn = acquire_reserved_page(d, a->memflags);
+                if ( mfn_eq(mfn, INVALID_MFN) )
+                {
+                    gdprintk(XENLOG_INFO,
+                             "%pd: failed to retrieve a reserved page.\n.",
+                             d);
+                    goto out;
+                }
+            }
+#endif
             else
             {
                 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 8ba38bca9a..759d612eb8 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2770,6 +2770,34 @@ int __init acquire_domstatic_pages(struct domain *d, 
mfn_t smfn,
 
     return 0;
 }
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+    struct page_info *page;
+    mfn_t smfn;
+
+    /* Acquire a page from reserved page list(resv_page_list). */
+    page = page_list_remove_head(&d->resv_page_list);
+    if ( unlikely(!page) )
+    {
+        printk(XENLOG_ERR
+               "%pd: failed to acquire a reserved page %"PRI_mfn".\n",
+               d, mfn_x(page_to_mfn(page)));
+        return INVALID_MFN;
+    }
+    d->resv_pages--;
+
+    smfn = page_to_mfn(page);
+
+    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
+        return INVALID_MFN;
+
+    return smfn;
+}
 #endif
 
 /*
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 3be754da92..6ef5a6adcd 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -91,6 +91,7 @@ void free_staticmem_pages(struct page_info *pg, unsigned long 
nr_mfns,
                           bool need_scrub);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
                             unsigned int memflags);
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
 #endif
 
 /* Map machine page range in Xen virtual address space. */
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.