[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 6/9] xen/arm: introduce alloc_staticmem_pages and alloc_domstatic_pages


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <sstabellini@xxxxxxxxxx>, <julien@xxxxxxx>, <jbeulich@xxxxxxxx>
  • From: Penny Zheng <penny.zheng@xxxxxxx>
  • Date: Mon, 7 Jun 2021 02:43:15 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=pGxfEEBgvfyzUcQmDtVFw6fyVeow+GfVG9WP8F/FLKo=; b=Y1UOHINYILZP7T7FKZf7Npfd+eCRjeXHDXL9P0MEEAwL1jkifZu2CZclW/OIr8PcH7LvxD7PLd41gZLoJqV45RcR4XB1Z15PktEKOIATMAY7VLd+ud6xNw05iqz2YOBZpKqiFo5VOS1Am3bl7tgCoRyfSU1U2Zwoie3ZVeHBOng1NCA517seoWon2CBGj6OAhIfCY72GGd4CfhXNv+g5gFZc83TiowzMg4YWTptdHPxGjDwTVECleaQOxJoyP6ND2xj0oXBxOlaAOEKMdrmHiHGL4aUXppVeoEzZJ8RF8g4fAsnrYKcLza6fkvozUtAzaXLCd+7tFPm0g0cMBzU5gw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=gkKU40QEGQ7Y5k7LyIGVXnPPaJmLpF6vuP5RtGTrBIGdETEMF21StvwBdZfR52wdsjNgqnsSLcsXZ6X6uBDDk6AHgYEYCjE88XsqMHnnYG4Hw9o4FWQoeDuYSyOP0vuXWPU1ag5IjP6Q106zOn320tFeKDue7FuV9e9Do2XSK7P8UICADwqlidQm3JRsvNa+gvqjz98g/aR4D3TYl4hlrwFTVjHozN7+kT2f9CDzJ/F8JxWe55y39FtrRnfgdoA5jEXqlHoW+DTryEaGxaqEumSeOOnhUpkFpt+YxlfKViLreHDS0hLDzeI1YXO8shY+Z1q/BIkkZnfLg/ChIIZZWg==
  • Cc: <Bertrand.Marquis@xxxxxxx>, <Penny.Zheng@xxxxxxx>, <Wei.Chen@xxxxxxx>
  • Delivery-date: Mon, 07 Jun 2021 02:44:14 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

alloc_staticmem_pages aims to allocate nr_mfns contiguous pages of
static memory. And it is the equivalent of alloc_heap_pages for static
memory. Here only covers allocating at specified starting address.

For each page, it shall check if the page is reserved(PGC_reserved)
and free. It shall also do a set of necessary initialization, which are
mostly the same ones in alloc_heap_pages, like, following the same
cache-coherency policy and turning page status into PGC_state_inuse, etc.

alloc_domstatic_pages is the equivalent of alloc_domheap_pages for
static mmeory, and it is to allocate nr_mfns pages of static memory
and assign them to one specific domain.

It uses alloc_staticmen_pages to get nr_mfns pages of static memory,
then on success, it will use assign_pages_nr to assign those pages to
one specific domain.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
changes v2:
- use mfn_valid() to do validation
- change pfn-named to mfn-named
- put CONFIG_STATIC_ALLOCATION around to remove dead codes
- correct off-by-one indentation
- remove meaningless MEMF_no_owner case
- leave zone concept out of DMA limitation check
---
 xen/common/page_alloc.c | 129 ++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/mm.h    |   2 +
 2 files changed, 131 insertions(+)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e244d2e52e..a0eea5f1a4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1065,6 +1065,75 @@ static struct page_info *alloc_heap_pages(
     return pg;
 }
 
+#ifdef CONFIG_STATIC_ALLOCATION
+/*
+ * Allocate nr_mfns contiguous pages, starting at #smfn, of static memory.
+ * It is the equivalent of alloc_heap_pages for static memory
+ */
+static struct page_info *alloc_staticmem_pages(unsigned long nr_mfns,
+                                               mfn_t smfn,
+                                               unsigned int memflags)
+{
+    bool need_tlbflush = false;
+    uint32_t tlbflush_timestamp = 0;
+    unsigned long i;
+    struct page_info *pg;
+
+    /* For now, it only supports allocating at specified address. */
+    if ( !mfn_valid(smfn) || !nr_mfns )
+    {
+        printk(XENLOG_ERR
+               "Invalid %lu static memory starting at %"PRI_mfn"\n",
+               nr_mfns, mfn_x(smfn));
+        return NULL;
+    }
+    pg = mfn_to_page(smfn);
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        /*
+         * Reference count must continuously be zero for free pages
+         * of static memory(PGC_reserved).
+         */
+        ASSERT(pg[i].count_info & PGC_reserved);
+        if ( (pg[i].count_info & ~PGC_reserved) != PGC_state_free )
+        {
+            printk(XENLOG_ERR
+                   "Reference count must continuously be zero for free pages"
+                   "pg[%lu] MFN %"PRI_mfn" c=%#lx t=%#x\n",
+                   i, mfn_x(page_to_mfn(pg + i)),
+                   pg[i].count_info, pg[i].tlbflush_timestamp);
+            BUG();
+        }
+
+        if ( !(memflags & MEMF_no_tlbflush) )
+            accumulate_tlbflush(&need_tlbflush, &pg[i],
+                                &tlbflush_timestamp);
+
+        /*
+         * Preserve flag PGC_reserved and change page state
+         * to PGC_state_inuse.
+         */
+        pg[i].count_info = (pg[i].count_info & PGC_reserved) | PGC_state_inuse;
+        /* Initialise fields which have other uses for free pages. */
+        pg[i].u.inuse.type_info = 0;
+        page_set_owner(&pg[i], NULL);
+
+        /*
+         * Ensure cache and RAM are consistent for platforms where the
+         * guest can control its own visibility of/through the cache.
+         */
+        flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])),
+                            !(memflags & MEMF_no_icache_flush));
+    }
+
+    if ( need_tlbflush )
+        filtered_flush_tlb_mask(tlbflush_timestamp);
+
+    return pg;
+}
+#endif
+
 /* Remove any offlined page in the buddy pointed to by head. */
 static int reserve_offlined_page(struct page_info *head)
 {
@@ -2326,7 +2395,11 @@ int assign_pages_nr(
 
         for ( i = 0; i < nr_pfns; i++ )
         {
+#ifdef CONFIG_STATIC_ALLOCATION
+            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
+#else
             ASSERT(!(pg[i].count_info & ~PGC_extra));
+#endif
             if ( pg[i].count_info & PGC_extra )
                 extra_pages++;
         }
@@ -2365,7 +2438,12 @@ int assign_pages_nr(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
+#ifdef CONFIG_STATIC_ALLOCATION
+            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+#else
             (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+#endif
+
         page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
 
@@ -2434,6 +2512,57 @@ struct page_info *alloc_domheap_pages(
     return pg;
 }
 
+#ifdef CONFIG_STATIC_ALLOCATION
+/*
+ * Allocate nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ * It is the equivalent of alloc_domheap_pages for static memory.
+ */
+struct page_info *alloc_domstatic_pages(
+        struct domain *d, unsigned long nr_mfns, mfn_t smfn,
+        unsigned int memflags)
+{
+    struct page_info *pg = NULL;
+    unsigned long dma_size;
+
+    ASSERT(!in_irq());
+
+    if ( !dma_bitsize )
+        memflags &= ~MEMF_no_dma;
+    else
+    {
+        if ( (dma_bitsize - PAGE_SHIFT) > 0 )
+        {
+            dma_size = 1ul << (dma_bitsize - PAGE_SHIFT);
+            /* Starting address shall meet the DMA limitation. */
+            if ( mfn_x(smfn) < dma_size )
+                return NULL;
+        }
+    }
+
+    pg = alloc_staticmem_pages(nr_mfns, smfn, memflags);
+    if ( !pg )
+        return NULL;
+
+    /* Right now, MEMF_no_owner case is meaningless here. */
+    ASSERT(d);
+    if ( memflags & MEMF_no_refcount )
+    {
+        unsigned long i;
+
+        for ( i = 0; i < nr_mfns; i++ )
+            pg[i].count_info |= PGC_extra;
+    }
+    if ( assign_pages_nr(d, pg, nr_mfns, memflags) )
+    {
+        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+        return NULL;
+    }
+
+    return pg;
+}
+#endif
+
 void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
     struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 25d970e857..a07bd02923 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -89,6 +89,8 @@ bool scrub_free_pages(void);
 /* Static Allocation */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                           bool need_scrub);
+struct page_info *alloc_domstatic_pages(struct domain *d,
+        unsigned long nr_mfns, mfn_t smfn, unsigned int memflags);
 #endif
 
 /* Map machine page range in Xen virtual address space. */
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.