[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH V4 08/10] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages



Hi Penny,

On 28/07/2021 11:27, Penny Zheng wrote:
alloc_staticmem_pages aims to acquire nr_mfns contiguous pages of
static memory. And it is the equivalent of alloc_heap_pages for static
memory. Here only covers acquiring pre-configured static memory.

For each page, it shall check if the page is reserved(PGC_reserved)
and free. It shall also do a set of necessary initialization, which are
mostly the same ones in alloc_heap_pages, like, following the same
cache-coherency policy and turning page status into PGC_state_inuse, etc.

acquire_domstatic_pages is the equivalent of alloc_domheap_pages for
static memory, and it is to acquire nr_mfns contiguous pages of static memory
and assign them to one specific domain.

It uses acquire_staticmem_pages to acquire nr_mfns pre-configured pages of
static memory, then on success, it will use assign_pages to assign those pages
to one specific domain.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
---
v4 change:
- moving tlb/cache flush outside of the locked region, considering XSA-364
and reducing the amount of work happening with the heap_lock held
- remove MEMF_no_refcount case
- make acquire_staticmem_pages/acquire_domstatic_pages being __init
---
  xen/common/page_alloc.c | 108 +++++++++++++++++++++++++++++++++++++++-
  xen/include/xen/mm.h    |   3 ++
  2 files changed, 109 insertions(+), 2 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e279c6f713..b0edaf12b3 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,6 +151,10 @@
  #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
  #endif
+#ifndef CONFIG_STATIC_MEMORY
+#define PGC_reserved 0
+#endif
+
  /*
   * Comma-separated list of hexadecimal page numbers containing bad bytes.
   * e.g. 'badpage=0x3f45,0x8a321'.
@@ -1065,6 +1069,73 @@ static struct page_info *alloc_heap_pages(
      return pg;
  }
+#ifdef CONFIG_STATIC_MEMORY

Rather than having multiple #ifdef in the code. Could we bundle all the functions for static allocation in a single place?

+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ */
+static struct page_info * __init acquire_staticmem_pages(unsigned long nr_mfns,
+                                                         mfn_t smfn,
+                                                         unsigned int memflags)

NIT: I find more intuitive if we pass the start MFN first and then the number of pages. So this can be seen as a range.

If you agree with that, then the caller would also have to be changed.

+{
+    bool need_tlbflush = false;
+    uint32_t tlbflush_timestamp = 0;
+    unsigned long i;
+    struct page_info *pg;
+
+    /* For now, it only supports pre-configured static memory. */

This comment doesn't seem to match the check below.

+    if ( !mfn_valid(smfn) || !nr_mfns )

This check only guarantees that there will be a page for the first MFN. Shouldn't we also check for the other MFNs?

+        return NULL;
+
+    spin_lock(&heap_lock);
+
+    pg = mfn_to_page(smfn);
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        /*
+         * Reference count must continuously be zero for free pages
+         * of static memory(PGC_reserved).
+         */

How about: "The page should be reserved and not yet allocated"?

+        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+        {
+            printk(XENLOG_ERR
+                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
+                   i, mfn_x(page_to_mfn(pg + i)),
+                   pg[i].count_info, pg[i].tlbflush_timestamp);
+            BUG();

This BUG() can be easily hit by misconfiguring the Device-Tree. I think it would be best if we return an error and revert the changes.

+        }
+
+        if ( !(memflags & MEMF_no_tlbflush) )
+            accumulate_tlbflush(&need_tlbflush, &pg[i],
+                                &tlbflush_timestamp);
+
+        /*
+         * Preserve flag PGC_reserved and change page state
+         * to PGC_state_inuse.
+         */
+        pg[i].count_info = (PGC_reserved | PGC_state_inuse);
+        /* Initialise fields which have other uses for free pages. */
+        pg[i].u.inuse.type_info = 0;
+        page_set_owner(&pg[i], NULL);
+    }
+
+    spin_unlock(&heap_lock);
+
+    if ( need_tlbflush )
+        filtered_flush_tlb_mask(tlbflush_timestamp);
+
+    /*
+     * Ensure cache and RAM are consistent for platforms where the guest
+     * can control its own visibility of/through the cache.
+     */
+    for ( i = 0; i < nr_mfns; i++ )
+        flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
+
+    return pg;
+}
+#endif
+
  /* Remove any offlined page in the buddy pointed to by head. */
  static int reserve_offlined_page(struct page_info *head)
  {
@@ -2306,7 +2377,7 @@ int assign_pages(
for ( i = 0; i < nr; i++ )
          {
-            ASSERT(!(pg[i].count_info & ~PGC_extra));
+            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
              if ( pg[i].count_info & PGC_extra )
                  extra_pages++;
          }
@@ -2345,7 +2416,8 @@ int assign_pages(
          page_set_owner(&pg[i], d);
          smp_wmb(); /* Domain pointer must be visible before updating refcnt. 
*/
          pg[i].count_info =
-            (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 
1;
+
          page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
      }
@@ -2411,6 +2483,38 @@ struct page_info *alloc_domheap_pages(
      return pg;
  }
+#ifdef CONFIG_STATIC_MEMORY
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+struct page_info * __init acquire_domstatic_pages(struct domain *d,
+                                                  unsigned long nr_mfns,
+                                                  mfn_t smfn, unsigned int 
memflags)
+{
+    struct page_info *pg = NULL;
+
+    ASSERT(!in_irq());
+
+    pg = acquire_staticmem_pages(nr_mfns, smfn, memflags);
+    if ( !pg )
+        return NULL;
+
+    /*
+     * MEMF_no_owner/MEMF_no_refcount cases are missing here because
+     * right now, acquired static memory is only for guest RAM.
+     */
+    ASSERT(d);
+    if ( assign_pages(pg, nr_mfns, d, memflags) )
+    {
+        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+        return NULL;
+    }
+
+    return pg;
+}
+#endif
+
  void free_domheap_pages(struct page_info *pg, unsigned int order)
  {
      struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 2e75cdcbb7..62e8e2ad61 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -89,6 +89,9 @@ bool scrub_free_pages(void);
  /* These functions are for static memory */
  void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                            bool need_scrub);
+struct page_info *acquire_domstatic_pages(struct domain *d,
+                                          unsigned long nr_mfns, mfn_t smfn,
+                                          unsigned int memflags);
  #endif
/* Map machine page range in Xen virtual address space. */


Cheers,

--
Julien Grall



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.