[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/14] xen: Convert is_xen_heap_mfn to use typesafe MFN



No functional changes.

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>

---
    Changes in v2:
        - Fix coding style
        - Merge the declaration for gpfn and old_gpfn
        - Don't open-code mfn_valid
        - Add Jan's reviewed-by
        - Use mfn_add(mfn, -1)
        - Add Stefano's acked-by for ARM parts
---
 xen/arch/x86/mm.c              | 10 +++++-----
 xen/arch/x86/mm/p2m.c          |  2 +-
 xen/arch/x86/mm/shadow/multi.c |  2 +-
 xen/common/page_alloc.c        |  4 ++--
 xen/include/asm-arm/mm.h       |  8 ++++----
 xen/include/asm-x86/mm.h       |  2 +-
 6 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9878453eb0..570e1e0deb 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4462,8 +4462,8 @@ int xenmem_add_to_physmap_one(
     gfn_t gpfn)
 {
     struct page_info *page = NULL;
-    unsigned long gfn = 0; /* gcc ... */
-    unsigned long prev_mfn, old_gpfn;
+    unsigned long gfn = 0 /* gcc ... */, old_gpfn;
+    mfn_t prev_mfn;
     int rc = 0;
     mfn_t mfn = INVALID_MFN;
     p2m_type_t p2mt;
@@ -4512,12 +4512,12 @@ int xenmem_add_to_physmap_one(
     }
 
     /* Remove previously mapped page if it was present. */
-    prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
-    if ( mfn_valid(_mfn(prev_mfn)) )
+    prev_mfn = get_gfn(d, gfn_x(gpfn), &p2mt);
+    if ( mfn_valid(prev_mfn) )
     {
         if ( is_xen_heap_mfn(prev_mfn) )
             /* Xen heap frames are simply unhooked from this phys slot. */
-            rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), 
PAGE_ORDER_4K);
+            rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
         else
             /* Normal domain memory is freed, to avoid leaking memory. */
             rc = guest_remove_page(d, gfn_x(gpfn));
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b16117dc56..3c98f72dbb 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2935,7 +2935,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
     prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
     if ( mfn_valid(prev_mfn) )
     {
-        if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+        if ( is_xen_heap_mfn(prev_mfn) )
             /* Xen heap frames are simply unhooked from this phys slot */
             rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
         else
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 8781bdcfe5..35a7b606d0 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -559,7 +559,7 @@ _sh_propagate(struct vcpu *v,
      * caching attributes in the shadows to match what was asked for.
      */
     if ( (level == 1) && is_hvm_domain(d) &&
-         !is_xen_heap_mfn(mfn_x(target_mfn)) )
+         !is_xen_heap_mfn(target_mfn) )
     {
         int type;
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b4067ae202..6061cce24f 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2121,9 +2121,9 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe)
      * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
      * prevent merging of power-of-two blocks across the zone boundary.
      */
-    if ( ps && !is_xen_heap_mfn(paddr_to_pfn(ps)-1) )
+    if ( ps && !is_xen_heap_mfn(mfn_add(maddr_to_mfn(ps), -1)) )
         ps += PAGE_SIZE;
-    if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
+    if ( !is_xen_heap_mfn(maddr_to_mfn(pe)) )
         pe -= PAGE_SIZE;
 
     memguard_guard_range(maddr_to_virt(ps), pe - ps);
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index b56018aace..a9c8352b94 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -138,16 +138,16 @@ extern vaddr_t xenheap_virt_start;
 #endif
 
 #ifdef CONFIG_ARM_32
-#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(page_to_mfn(page)))
+#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
 #define is_xen_heap_mfn(mfn) ({                                 \
-    unsigned long mfn_ = (mfn);                                 \
+    unsigned long mfn_ = mfn_x(mfn);                            \
     (mfn_ >= mfn_x(xenheap_mfn_start) &&                        \
      mfn_ < mfn_x(xenheap_mfn_end));                            \
 })
 #else
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
 #define is_xen_heap_mfn(mfn) \
-    (mfn_valid(_mfn(mfn)) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+    (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
 #endif
 
 #define is_xen_fixed_mfn(mfn)                                   \
@@ -246,7 +246,7 @@ static inline paddr_t __virt_to_maddr(vaddr_t va)
 #ifdef CONFIG_ARM_32
 static inline void *maddr_to_virt(paddr_t ma)
 {
-    ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
+    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
     ma -= mfn_to_maddr(xenheap_mfn_start);
     return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
 }
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index f124f57964..6f76f004ab 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -278,7 +278,7 @@ struct page_info
 
 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
 #define is_xen_heap_mfn(mfn) \
-    (__mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+    (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
 #define is_xen_fixed_mfn(mfn)                     \
     (((mfn_to_maddr(mfn)) >= __pa(&_stext)) &&    \
      ((mfn_to_maddr(mfn)) <= __pa(&__2M_rwdata_end)))
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.