[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/dom0: use MEMF_no_scrub during Dom0 construction



Now that idle scrub is the default option, all memory is marked as dirty
and alloc_domheap_pages() will do eager scrubbing by default. This can
lead to longer Dom0 construction and potentially to a watchdog timeout,
especially on older H/W (e.g. Harpertown).

Pass MEMF_no_scrub to optimise this process since there is little point
in scrubbing memory for Dom0.

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
v2:
- use MEMF_no_scrub in more calls

CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/hvm/dom0_build.c |  2 +-
 xen/arch/x86/pv/dom0_build.c  | 23 ++++++++++++-----------
 2 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 3e29cd30b8..12c20a4b66 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -101,7 +101,7 @@ static int __init pvh_populate_memory_range(struct domain 
*d,
         unsigned int range_order = get_order_from_pages(nr_pages + 1);
 
         order = min(range_order ? range_order - 1 : 0, order);
-        page = alloc_domheap_pages(d, order, dom0_memflags);
+        page = alloc_domheap_pages(d, order, dom0_memflags | MEMF_no_scrub);
         if ( page == NULL )
         {
             if ( order == 0 && dom0_memflags )
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index dc3c1e1202..837ef7bca1 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -132,7 +132,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
         pl4e = l4start + l4_table_offset(vphysmap_start);
         if ( !l4e_get_intpte(*pl4e) )
         {
-            page = alloc_domheap_page(d, 0);
+            page = alloc_domheap_page(d, MEMF_no_scrub);
             if ( !page )
                 break;
 
@@ -160,13 +160,13 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
                  vphysmap_end >= vphysmap_start + (1UL << L3_PAGETABLE_SHIFT) 
&&
                  (page = alloc_domheap_pages(d,
                                              L3_PAGETABLE_SHIFT - PAGE_SHIFT,
-                                             0)) != NULL )
+                                             MEMF_no_scrub)) != NULL )
             {
                 *pl3e = l3e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
                 vphysmap_start += 1UL << L3_PAGETABLE_SHIFT;
                 continue;
             }
-            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+            if ( (page = alloc_domheap_page(d, MEMF_no_scrub)) == NULL )
                 break;
 
             /* No mapping, PGC_allocated + page-table page. */
@@ -186,13 +186,13 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
                  vphysmap_end >= vphysmap_start + (1UL << L2_PAGETABLE_SHIFT) 
&&
                  (page = alloc_domheap_pages(d,
                                              L2_PAGETABLE_SHIFT - PAGE_SHIFT,
-                                             0)) != NULL )
+                                             MEMF_no_scrub)) != NULL )
             {
                 *pl2e = l2e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
                 vphysmap_start += 1UL << L2_PAGETABLE_SHIFT;
                 continue;
             }
-            if ( (page = alloc_domheap_page(d, 0)) == NULL )
+            if ( (page = alloc_domheap_page(d, MEMF_no_scrub)) == NULL )
                 break;
 
             /* No mapping, PGC_allocated + page-table page. */
@@ -207,7 +207,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
 
         pl1e += l1_table_offset(vphysmap_start);
         BUG_ON(l1e_get_intpte(*pl1e));
-        page = alloc_domheap_page(d, 0);
+        page = alloc_domheap_page(d, MEMF_no_scrub);
         if ( !page )
             break;
 
@@ -239,7 +239,8 @@ static struct page_info * __init alloc_chunk(struct domain 
*d,
         order = last_order;
     else if ( max_pages & (max_pages - 1) )
         --order;
-    while ( (page = alloc_domheap_pages(d, order, dom0_memflags)) == NULL )
+    while ( (page = alloc_domheap_pages(d, order, dom0_memflags |
+                                                  MEMF_no_scrub)) == NULL )
         if ( order-- == 0 )
             break;
     if ( page )
@@ -265,7 +266,7 @@ static struct page_info * __init alloc_chunk(struct domain 
*d,
 
         if ( d->tot_pages + (1 << order) > d->max_pages )
             continue;
-        pg2 = alloc_domheap_pages(d, order, MEMF_exact_node);
+        pg2 = alloc_domheap_pages(d, order, MEMF_exact_node | MEMF_no_scrub);
         if ( pg2 > page )
         {
             free_domheap_pages(page, free_order);
@@ -502,7 +503,7 @@ int __init dom0_construct_pv(struct domain *d,
         vphysmap_start = parms.p2m_base;
         vphysmap_end   = vphysmap_start + nr_pages * sizeof(unsigned long);
     }
-    page = alloc_domheap_pages(d, order, 0);
+    page = alloc_domheap_pages(d, order, MEMF_no_scrub);
     if ( page == NULL )
         panic("Not enough RAM for domain 0 allocation\n");
     alloc_spfn = mfn_x(page_to_mfn(page));
@@ -519,7 +520,7 @@ int __init dom0_construct_pv(struct domain *d,
              ((mfn + count - 1) >> (d->arch.physaddr_bitsize - PAGE_SHIFT)) )
         {
             order = get_order_from_pages(count);
-            page = alloc_domheap_pages(d, order, 0);
+            page = alloc_domheap_pages(d, order, MEMF_no_scrub);
             if ( !page )
                 panic("Not enough RAM for domain 0 initrd\n");
             for ( count = -count; order--; )
@@ -608,7 +609,7 @@ int __init dom0_construct_pv(struct domain *d,
     }
     else
     {
-        page = alloc_domheap_page(d, MEMF_no_owner);
+        page = alloc_domheap_page(d, MEMF_no_owner | MEMF_no_scrub);
         if ( !page )
             panic("Not enough RAM for domain 0 PML4\n");
         page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.