[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/17] xen: mm: manage EPC pages in Xen heaps



EPC is limited resouce reserved by BIOS, and is reported as reserved
memory in e820 but not normal memory. EPC must be managed in 4K pages,
and could not be accessed outside the Enclaves.

Using the existing memory allocation API(i.e. the heaps) allows us to
manage EPC pages in an efficient way, and may benefit EPC ballooning
implementation in the feature.

In order to use the existing heap mechanism to manage EPC pages, a
dedicated MEMZONE is required, because we need to avoid the mixture of
EPC pages and normal pages in one zone. And for the page_to_zone() to
return the proper zone number, similar to 'PGC_xen_heap' and
'is_xen_heap_page', 'PGC_epc' and 'is_epc_page' are introduced.

In 'free_heap_pages', 'need_scrub' is reset if the page is found to be
an EPC page, because EPC pages can not be scrubbed. And there is no
entry of EPC pages in m2p table, as it's not used, so related setting is
skipped.

Besides, a 'MEMF_epc' memflag is introduced to tell the allocator to get
EPC pages rather than normal memory.

Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx>
---
 xen/common/page_alloc.c  | 31 +++++++++++++++++++++++++------
 xen/include/asm-arm/mm.h |  2 ++
 xen/include/asm-x86/mm.h |  5 ++++-
 xen/include/xen/mm.h     |  2 ++
 4 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 220d7d91c62b..3b9d2c1a534f 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -377,12 +377,14 @@ mfn_t __init alloc_boot_pages(unsigned long nr_pfns, 
unsigned long pfn_align)
  * BINARY BUDDY ALLOCATOR
  */
 
-#define MEMZONE_XEN 0
+#define MEMZONE_EPC 0
+#define MEMZONE_XEN 1
 #define NR_ZONES    (PADDR_BITS - PAGE_SHIFT + 1)
 
 #define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 1 : ((b) - PAGE_SHIFT))
-#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN :  \
-                          (flsl(page_to_mfn(pg)) ? : 1))
+#define page_to_zone(pg) (is_epc_page(pg) ? MEMZONE_EPC :  \
+                          is_xen_heap_page(pg) ? MEMZONE_XEN :  \
+                          (flsl(page_to_mfn(pg)) ? : MEMZONE_XEN + 1))
 
 typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
 static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
@@ -921,7 +923,12 @@ static struct page_info *alloc_heap_pages(
     }
 
     node = phys_to_nid(page_to_maddr(pg));
-    zone = page_to_zone(pg);
+
+    if ( memflags & MEMF_epc )
+        zone = MEMZONE_EPC;
+    else
+        zone = page_to_zone(pg);
+
     buddy_order = PFN_ORDER(pg);
 
     first_dirty = pg->u.free.first_dirty;
@@ -1332,10 +1339,14 @@ static void free_heap_pages(
     unsigned long mask, mfn = page_to_mfn(pg);
     unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
     unsigned int zone = page_to_zone(pg);
+    bool is_epc = false;
 
     ASSERT(order <= MAX_ORDER);
     ASSERT(node >= 0);
 
+    is_epc = is_epc_page(pg);
+    need_scrub = need_scrub && !is_epc;
+
     spin_lock(&heap_lock);
 
     for ( i = 0; i < (1 << order); i++ )
@@ -1364,11 +1375,13 @@ static void free_heap_pages(
         if ( pg[i].u.free.need_tlbflush )
             page_set_tlbflush_timestamp(&pg[i]);
 
-        pg[i].u.free.scrubbable = true;
+        pg[i].u.free.scrubbable = !is_epc;
 
         /* This page is not a guest frame any more. */
         page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
-        set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
+
+        if ( !is_epc )
+            set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
 
         if ( need_scrub )
         {
@@ -2232,6 +2245,12 @@ struct page_info *alloc_domheap_pages(
     if ( memflags & MEMF_no_owner )
         memflags |= MEMF_no_refcount;
 
+    /* MEMF_epc implies MEMF_no_scrub */
+    if ((memflags & MEMF_epc) &&
+        !(pg = alloc_heap_pages(MEMZONE_EPC, MEMZONE_EPC, order,
+                                memflags | MEMF_no_scrub, d)))
+        return NULL;
+
     if ( dma_bitsize && ((dma_zone = bits_to_zone(dma_bitsize)) < zone_hi) )
         pg = alloc_heap_pages(dma_zone + 1, zone_hi, order, memflags, d);
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index c715e2290510..bca26f027402 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -153,6 +153,8 @@ extern vaddr_t xenheap_virt_start;
     (mfn_valid(_mfn(mfn)) && is_xen_heap_page(__mfn_to_page(mfn)))
 #endif
 
+#define is_epc_page(page)           false
+
 #define page_scrubbable(_p)         true
 
 #define page_mergeable(_p1, _p2)    true
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b0f0ea0a8b5d..1dedb8099801 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -259,8 +259,10 @@ struct page_info
 #define PGC_state_free    PG_mask(3, 9)
 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
 
+#define _PGC_epc          PG_shift(10)
+#define PGC_epc           PG_mask(1, 10)
  /* Count of references to this frame. */
-#define PGC_count_width   PG_shift(9)
+#define PGC_count_width   PG_shift(10)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 /*
@@ -271,6 +273,7 @@ struct page_info
 #define PGC_need_scrub    PGC_allocated
 
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
+#define is_epc_page(page) ((page)->count_info & PGC_epc)
 #define is_xen_heap_mfn(mfn) \
     (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
 #define is_xen_fixed_mfn(mfn)                     \
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index e813c07b225c..721a2975c1d4 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -250,6 +250,8 @@ struct npfec {
 #define  MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
 #define _MEMF_no_scrub    8
 #define  MEMF_no_scrub    (1U<<_MEMF_no_scrub)
+#define _MEMF_epc         9
+#define  MEMF_epc         (1U<<_MEMF_epc)
 #define _MEMF_node        16
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
-- 
2.15.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.