[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 03/26] xen/x86: consolidate vram tracking support


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Penny Zheng <Penny.Zheng@xxxxxxx>
  • Date: Wed, 10 Sep 2025 15:38:04 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=laR1vIy8iDHUWom8KybMGHOL/51jt2m++lMgJI0rWmk=; b=Flo1PdL1k0Zr8Re+0tukVLpCsWjdCUKek4UhJ+yVpX4JigoA0aNxBSNd0LZH33U0IOKFdEejix1QUxHPpp7h1pQuctgk17LzOO5wC7kUuHAbK8kbNjvkVu6PP2DF2zn/fNSnX8YuNYYUN+XJDg8+Z/fv6ncqzOL9eikOfBvC+Td6NYzKmAMaRTdiGqhIbCzbFj3Ki5HxpxXlSjljzaBrzvLE6ESwFpziF+0nOMtn4FoSwNpKBkTB2F4m8BcOVmKVnaRd1kMzA0KK+06TA3n5Czphf8zAyR0BZCdqs/jsQySxHLr8P8yoJjox/WWzJneIIUaqfr21VUL00ic9RLqFZQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=Mjakr2j0FrYplPnrulqwzwuXuvkjlkBExnLjCcW4ccRE4Ql/L3oFKgBjeokP21Zqib8mTHfbfbWmlSpK9IRjAv8Dq+XfiyB8kkztBg7wGxD1cKfJQIlo3+ZbbHd0XlILD/PIyWsEIxOdKdKbgC3I0kVBv2apBwO5C52c2RRioU18BD8Q7vxHzUczyaSECOgvvF1XjusldkpSJBs6qrmAo0qa+PNu9mQ3kd3ogmLSCKxmq/HPzaqIVT6Wo6oTYz22Y9AYzkKxXClqFkJwAHgmcYP5D7IvMMRtkJkxfYtOWzsMBugtjlF6ZgaHQ2Wlu7hC28MAgCW2UVfbBI06Q3mUrA==
  • Cc: <ray.huang@xxxxxxx>, Penny Zheng <Penny.Zheng@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Wed, 10 Sep 2025 07:39:28 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Flag PG_log_dirty is for paging log dirty support, not vram tracking support.
However data structure sh_dirty_vram{} and function paging_log_dirty_range()
designed for vram tracking support, are guarded with PG_log_dirty.
We release both from PG_log_dirty, and also move paging_log_dirty_range(),
remamed with p2m_log_dirty_range(), into p2m.c, where it logically belongs.

Signed-off-by: Penny Zheng <Penny.Zheng@xxxxxxx>
---
v1 -> v2:
- rename paging_log_dirty_range() into p2m_log_dirty_range(), and move it
to p2m.c, where it logically belongs
- remove #ifdef / #endif
- change int to unsigned int
---
 xen/arch/x86/include/asm/p2m.h    |  4 ++++
 xen/arch/x86/include/asm/paging.h | 32 +++++++++++++------------------
 xen/arch/x86/mm/hap/hap.c         |  4 ++--
 xen/arch/x86/mm/p2m.c             | 28 +++++++++++++++++++++++++++
 xen/arch/x86/mm/paging.c          | 32 -------------------------------
 5 files changed, 47 insertions(+), 53 deletions(-)

diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h
index 3b860e30c3..1856cc396c 100644
--- a/xen/arch/x86/include/asm/p2m.h
+++ b/xen/arch/x86/include/asm/p2m.h
@@ -1110,6 +1110,10 @@ static inline int p2m_entry_modify(struct p2m_domain 
*p2m, p2m_type_t nt,
 
 #endif /* CONFIG_HVM */
 
+/* get the dirty bitmap for a specific range of pfns */
+void p2m_log_dirty_range(struct domain *d, unsigned long begin_pfn,
+                         unsigned long nr, uint8_t *dirty_bitmap);
+
 #endif /* _XEN_ASM_X86_P2M_H */
 
 /*
diff --git a/xen/arch/x86/include/asm/paging.h 
b/xen/arch/x86/include/asm/paging.h
index 768b077ebd..1b0694bb36 100644
--- a/xen/arch/x86/include/asm/paging.h
+++ b/xen/arch/x86/include/asm/paging.h
@@ -133,13 +133,20 @@ struct paging_mode {
     (DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \
                   PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1)
 
-#if PG_log_dirty
+#ifdef CONFIG_HVM
+/* VRAM dirty tracking support */
+struct sh_dirty_vram {
+    unsigned long begin_pfn;
+    unsigned long end_pfn;
+#ifdef CONFIG_SHADOW_PAGING
+    paddr_t *sl1ma;
+    uint8_t *dirty_bitmap;
+    s_time_t last_dirty;
+#endif
+};
+#endif
 
-/* get the dirty bitmap for a specific range of pfns */
-void paging_log_dirty_range(struct domain *d,
-                            unsigned long begin_pfn,
-                            unsigned long nr,
-                            uint8_t *dirty_bitmap);
+#if PG_log_dirty
 
 /* log dirty initialization */
 void paging_log_dirty_init(struct domain *d, const struct log_dirty_ops *ops);
@@ -171,19 +178,6 @@ bool paging_mfn_is_dirty(const struct domain *d, mfn_t 
gmfn);
 #define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER 
* 2)) & \
                               (LOGDIRTY_NODE_ENTRIES-1))
 
-#ifdef CONFIG_HVM
-/* VRAM dirty tracking support */
-struct sh_dirty_vram {
-    unsigned long begin_pfn;
-    unsigned long end_pfn;
-#ifdef CONFIG_SHADOW_PAGING
-    paddr_t *sl1ma;
-    uint8_t *dirty_bitmap;
-    s_time_t last_dirty;
-#endif
-};
-#endif
-
 #else /* !PG_log_dirty */
 
 static inline void paging_log_dirty_init(struct domain *d,
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 4aec98109d..2f69ff9c7b 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -42,7 +42,7 @@
  * Create a dirty vram range on demand when some [begin_pfn:begin_pfn+nr] is
  * first encountered.
  * Collect the guest_dirty bitmask, a bit mask of the dirty vram pages, by
- * calling paging_log_dirty_range(), which interrogates each vram
+ * calling p2m_log_dirty_range(), which interrogates each vram
  * page's p2m type looking for pages that have been made writable.
  */
 
@@ -119,7 +119,7 @@ int hap_track_dirty_vram(struct domain *d,
             p2m_flush_hardware_cached_dirty(d);
 
             /* get the bitmap */
-            paging_log_dirty_range(d, begin_pfn, nr_frames, dirty_bitmap);
+            p2m_log_dirty_range(d, begin_pfn, nr_frames, dirty_bitmap);
 
             domain_unpause(d);
         }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e802f2e4e6..e2a00a0efd 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2161,6 +2161,34 @@ int relinquish_p2m_mapping(struct domain *d)
     return rc;
 }
 
+void p2m_log_dirty_range(struct domain *d, unsigned long begin_pfn,
+                         unsigned long nr, uint8_t *dirty_bitmap)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    unsigned int i;
+    unsigned long pfn;
+
+    /*
+     * Set l1e entries of P2M table to be read-only.
+     *
+     * On first write, it page faults, its entry is changed to read-write,
+     * and on retry the write succeeds.
+     *
+     * We populate dirty_bitmap by looking for entries that have been
+     * switched to read-write.
+     */
+
+    p2m_lock(p2m);
+
+    for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
+        if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
+            dirty_bitmap[i >> 3] |= (1 << (i & 7));
+
+    p2m_unlock(p2m);
+
+    guest_flush_tlb_mask(d, d->dirty_cpumask);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 3aafb0990b..65455a6867 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -583,38 +583,6 @@ static int paging_log_dirty_op(struct domain *d,
     return rv;
 }
 
-#ifdef CONFIG_HVM
-void paging_log_dirty_range(struct domain *d,
-                           unsigned long begin_pfn,
-                           unsigned long nr,
-                           uint8_t *dirty_bitmap)
-{
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int i;
-    unsigned long pfn;
-
-    /*
-     * Set l1e entries of P2M table to be read-only.
-     *
-     * On first write, it page faults, its entry is changed to read-write,
-     * and on retry the write succeeds.
-     *
-     * We populate dirty_bitmap by looking for entries that have been
-     * switched to read-write.
-     */
-
-    p2m_lock(p2m);
-
-    for ( i = 0, pfn = begin_pfn; pfn < begin_pfn + nr; i++, pfn++ )
-        if ( !p2m_change_type_one(d, pfn, p2m_ram_rw, p2m_ram_logdirty) )
-            dirty_bitmap[i >> 3] |= (1 << (i & 7));
-
-    p2m_unlock(p2m);
-
-    guest_flush_tlb_mask(d, d->dirty_cpumask);
-}
-#endif
-
 /*
  * Callers must supply log_dirty_ops for the log dirty code to call. This
  * function usually is invoked when paging is enabled. Check shadow_enable()
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.