|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 1/4] xen/mm: remove aliasing of PGC_need_scrub over PGC_allocated
Future changes will care about the state of the PGC_need_scrub flag even
when pages have the PGC_allocated set, and hence it's no longer possible to
alias both values. Also introduce PGC_need_scrub to the set of preserved
flags, so it's not dropped by assign_pages().
No functional change intended, albeit the page counter on x86 looses a bit.
Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Changes since v4:
- New in this version.
---
The PGC space on arches different than x86 is not compact, so I've just
used a hole in those.
---
xen/arch/arm/include/asm/mm.h | 10 +++-------
xen/arch/ppc/include/asm/mm.h | 10 +++-------
xen/arch/riscv/include/asm/mm.h | 10 +++-------
xen/arch/x86/include/asm/mm.h | 18 +++++++-----------
xen/common/page_alloc.c | 2 +-
5 files changed, 17 insertions(+), 33 deletions(-)
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index ec2d2dc5372a..72a692862420 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -144,6 +144,9 @@ struct page_info
#define _PGC_colored PG_shift(4)
#define PGC_colored PG_mask(1, 4)
#endif
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub PG_shift(5)
+#define PGC_need_scrub PG_mask(1, 5)
/* ... */
/* Page is broken? */
#define _PGC_broken PG_shift(7)
@@ -163,13 +166,6 @@ struct page_info
#define PGC_count_width PG_shift(10)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub _PGC_allocated
-#define PGC_need_scrub PGC_allocated
-
#if defined(CONFIG_ARM_64) || defined(CONFIG_MPU)
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
diff --git a/xen/arch/ppc/include/asm/mm.h b/xen/arch/ppc/include/asm/mm.h
index 91c405876bd0..402d06bdaa9f 100644
--- a/xen/arch/ppc/include/asm/mm.h
+++ b/xen/arch/ppc/include/asm/mm.h
@@ -57,6 +57,9 @@ static inline struct page_info *virt_to_page(const void *v)
/* Page is Xen heap? */
#define _PGC_xen_heap PG_shift(2)
#define PGC_xen_heap PG_mask(1, 2)
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub PG_shift(3)
+#define PGC_need_scrub PG_mask(1, 3)
/* Page is broken? */
#define _PGC_broken PG_shift(7)
#define PGC_broken PG_mask(1, 7)
@@ -75,13 +78,6 @@ static inline struct page_info *virt_to_page(const void *v)
#define PGC_count_width PG_shift(10)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub _PGC_allocated
-#define PGC_need_scrub PGC_allocated
-
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
(mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index a005d0247a6f..9e28c2495462 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -273,13 +273,6 @@ static inline bool arch_mfns_in_directmap(unsigned long
mfn, unsigned long nr)
#define PGT_count_width PG_shift(2)
#define PGT_count_mask ((1UL << PGT_count_width) - 1)
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub _PGC_allocated
-#define PGC_need_scrub PGC_allocated
-
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated PG_shift(1)
#define PGC_allocated PG_mask(1, 1)
@@ -293,6 +286,9 @@ static inline bool arch_mfns_in_directmap(unsigned long
mfn, unsigned long nr)
#else
#define PGC_static 0
#endif
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub PG_shift(4)
+#define PGC_need_scrub PG_mask(1, 4)
/* Page is broken? */
#define _PGC_broken PG_shift(7)
#define PGC_broken PG_mask(1, 7)
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 419fa17a4373..06c20ab8de33 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -83,29 +83,25 @@
#define PGC_state_offlined PG_mask(2, 6)
#define PGC_state_free PG_mask(3, 6)
#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
+/* Page needs to be scrubbed. */
+#define _PGC_need_scrub PG_shift(7)
+#define PGC_need_scrub PG_mask(1, 7)
#ifdef CONFIG_SHADOW_PAGING
/* Set when a page table page has been shadowed. */
-#define _PGC_shadowed_pt PG_shift(7)
-#define PGC_shadowed_pt PG_mask(1, 7)
+#define _PGC_shadowed_pt PG_shift(8)
+#define PGC_shadowed_pt PG_mask(1, 8)
#else
#define PGC_shadowed_pt 0
#endif
/* Count of references to this frame. */
#if PGC_shadowed_pt
-#define PGC_count_width PG_shift(7)
+#define PGC_count_width PG_shift(8)
#else
-#define PGC_count_width PG_shift(6)
+#define PGC_count_width PG_shift(7)
#endif
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
-/*
- * Page needs to be scrubbed. Since this bit can only be set on a page that is
- * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
- */
-#define _PGC_need_scrub _PGC_allocated
-#define PGC_need_scrub PGC_allocated
-
#ifndef CONFIG_BIGMEM
/*
* This definition is solely for the use in struct page_info (and
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 2efc11ce095f..2ee249ac365a 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -170,7 +170,7 @@
/*
* Flags that are preserved in assign_pages() (and only there)
*/
-#define PGC_preserved (PGC_extra | PGC_static | PGC_colored)
+#define PGC_preserved (PGC_extra | PGC_static | PGC_colored | PGC_need_scrub)
#ifndef PGT_TYPE_INFO_INITIALIZER
#define PGT_TYPE_INFO_INITIALIZER 0
--
2.51.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |