|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/2] xen/mm: limit non-scrubbed allocations to a specific order
The current model of falling back to allocate unscrubbed pages and scrub
them in place at allocation time risks triggering the watchdog:
Watchdog timer detects that CPU55 is stuck!
----[ Xen-4.17.5-21 x86_64 debug=n Not tainted ]----
CPU: 55
RIP: e008:[<ffff82d040204c4a>] clear_page_sse2+0x1a/0x30
RFLAGS: 0000000000000202 CONTEXT: hypervisor (d0v12)
[...]
Xen call trace:
[<ffff82d040204c4a>] R clear_page_sse2+0x1a/0x30
[<ffff82d04022a121>] S clear_domain_page+0x11/0x20
[<ffff82d04022c170>] S common/page_alloc.c#alloc_heap_pages+0x400/0x5a0
[<ffff82d04022d4a7>] S alloc_domheap_pages+0x67/0x180
[<ffff82d040226f9f>] S common/memory.c#populate_physmap+0x22f/0x3b0
[<ffff82d040228ec8>] S do_memory_op+0x728/0x1970
The maximum allocation order on x86 is limited to 18, that means allocating
and scrubbing possibly 1G worth of memory in 4K chunks.
Start by limiting dirty allocations to CONFIG_DOMU_MAX_ORDER, which is
currently set to 2M chunks. However such limitation might cause
fragmentation in HVM p2m population during domain creation. To prevent
that introduce some extra logic in populate_physmap() that fallback to
preemptive page-scrubbing if the requested allocation cannot be fulfilled
and there's scrubbing work to do. This approach is less fair than the
current one, but allows preemptive page scrubbing in the context of
populate_physmap() to attempt to ensure unnecessary page-shattering.
Fixes: 74d2e11ccfd2 ("mm: Scrub pages in alloc_heap_pages() if needed")
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
I'm not particularly happy with this approach, as it doesn't guarantee
progress for the callers. IOW: a caller might do a lot of scrubbing, just
to get it's pages stolen by a different concurrent thread doing
allocations. However I'm not sure there's a better solution than resorting
to 2M allocations if there's not enough free memory that is scrubbed.
I'm having trouble seeing where we could temporary store page(s) allocated
that need to be scrubbed before being assigned to the domain, in a way that
can be used by continuations, and that would allow Xen to keep track of
them in case the operation is never finished. IOW: we would need to
account for cleanup of such temporary stash of pages in case the domain
never completes the hypercall, or is destroyed midway.
Otherwise we could add the option to switch back to scrubbing before
returning the pages to the free pool, but that's also problematic: the
current approach aim to scrub pages in the same NUMA node as the CPU that's
doing the scrubbing. If we scrub in the context of the domain destruction
hypercall there's no attempt to scrub pages in the local NUMA node.
---
xen/common/memory.c | 12 ++++++++++++
xen/common/page_alloc.c | 37 +++++++++++++++++++++++++++++++++++--
xen/include/xen/mm.h | 9 +++++++++
3 files changed, 56 insertions(+), 2 deletions(-)
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 10becf7c1f4c..28b254e9d280 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -279,6 +279,18 @@ static void populate_physmap(struct memop_args *a)
if ( unlikely(!page) )
{
+ nodeid_t node = MEMF_get_node(a->memflags);
+
+ if ( memory_scrub_pending(node) ||
+ (node != NUMA_NO_NODE &&
+ !(a->memflags & MEMF_exact_node) &&
+ memory_scrub_pending(node = NUMA_NO_NODE)) )
+ {
+ scrub_free_pages(node);
+ a->preempted = 1;
+ goto out;
+ }
+
gdprintk(XENLOG_INFO,
"Could not allocate order=%u extent: id=%d
memflags=%#x (%u of %u)\n",
a->extent_order, d->domain_id, a->memflags,
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 248c44df32b3..d4dabc997c44 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -483,6 +483,20 @@ static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
static unsigned long node_need_scrub[MAX_NUMNODES];
+bool memory_scrub_pending(nodeid_t node)
+{
+ nodeid_t i;
+
+ if ( node != NUMA_NO_NODE )
+ return node_need_scrub[node];
+
+ for_each_online_node ( i )
+ if ( node_need_scrub[i] )
+ return true;
+
+ return false;
+}
+
static unsigned long *avail[MAX_NUMNODES];
static long total_avail_pages;
@@ -1007,8 +1021,18 @@ static struct page_info *alloc_heap_pages(
}
pg = get_free_buddy(zone_lo, zone_hi, order, memflags, d);
- /* Try getting a dirty buddy if we couldn't get a clean one. */
- if ( !pg && !(memflags & MEMF_no_scrub) )
+ /*
+ * Try getting a dirty buddy if we couldn't get a clean one. Limit the
+ * fallback to orders equal or below MAX_DIRTY_ORDER, as otherwise the
+ * non-preemptive scrubbing could trigger the watchdog.
+ */
+ if ( !pg && !(memflags & MEMF_no_scrub) &&
+ /*
+ * Allow any order unscrubbed allocations during boot time, we
+ * compensate by processing softirqs in the scrubbing loop below once
+ * irqs are enabled.
+ */
+ (order <= MAX_DIRTY_ORDER || system_state < SYS_STATE_active) )
pg = get_free_buddy(zone_lo, zone_hi, order,
memflags | MEMF_no_scrub, d);
if ( !pg )
@@ -1115,7 +1139,16 @@ static struct page_info *alloc_heap_pages(
if ( test_and_clear_bit(_PGC_need_scrub, &pg[i].count_info) )
{
if ( !(memflags & MEMF_no_scrub) )
+ {
scrub_one_page(&pg[i], cold);
+ /*
+ * Use SYS_STATE_smp_boot explicitly; ahead of that state
+ * interrupts are disabled.
+ */
+ if ( system_state == SYS_STATE_smp_boot &&
+ !(dirty_cnt & 0xff) )
+ process_pending_softirqs();
+ }
dirty_cnt++;
}
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 7067c9ec0405..a37476a99f1b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -92,6 +92,7 @@ void xenheap_max_mfn(unsigned long mfn);
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
void free_xenheap_pages(void *v, unsigned int order);
bool scrub_free_pages(nodeid_t node);
+bool memory_scrub_pending(nodeid_t node);
#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
#define free_xenheap_page(v) (free_xenheap_pages(v,0))
@@ -223,6 +224,14 @@ struct npfec {
#else
#define MAX_ORDER 20 /* 2^20 contiguous pages */
#endif
+
+/* Max order when scrubbing pages at allocation time. */
+#ifdef CONFIG_DOMU_MAX_ORDER
+# define MAX_DIRTY_ORDER CONFIG_DOMU_MAX_ORDER
+#else
+# define MAX_DIRTY_ORDER 9
+#endif
+
mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |