[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] x86/mm: Introduce and use l?e_{get, from}_mfn()



This avoids the explicit boxing/unboxing of mfn_t in relevant codepaths.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/debug.c            |  8 ++++----
 xen/arch/x86/domain_page.c      |  2 +-
 xen/arch/x86/mm.c               |  8 ++++----
 xen/arch/x86/mm/hap/hap.c       |  6 +++---
 xen/arch/x86/mm/p2m-pt.c        | 12 ++++++------
 xen/arch/x86/mm/shadow/common.c |  6 +++---
 xen/arch/x86/mm/shadow/multi.c  | 22 +++++++++++-----------
 xen/arch/x86/mm/shadow/types.h  | 16 ++++++++--------
 xen/include/asm-x86/page.h      | 18 +++++++++++++++---
 9 files changed, 55 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 5303532..1c10b84 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -108,7 +108,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
         l4t = map_domain_page(mfn);
         l4e = l4t[l4_table_offset(vaddr)];
         unmap_domain_page(l4t);
-        mfn = _mfn(l4e_get_pfn(l4e));
+        mfn = l4e_get_mfn(l4e);
         DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#"PRI_mfn"\n", l4t,
               l4_table_offset(vaddr), l4e, mfn_x(mfn));
         if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
@@ -120,7 +120,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
         l3t = map_domain_page(mfn);
         l3e = l3t[l3_table_offset(vaddr)];
         unmap_domain_page(l3t);
-        mfn = _mfn(l3e_get_pfn(l3e));
+        mfn = l3e_get_mfn(l3e);
         DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#"PRI_mfn"\n", l3t,
               l3_table_offset(vaddr), l3e, mfn_x(mfn));
         if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
@@ -134,7 +134,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
     l2t = map_domain_page(mfn);
     l2e = l2t[l2_table_offset(vaddr)];
     unmap_domain_page(l2t);
-    mfn = _mfn(l2e_get_pfn(l2e));
+    mfn = l2e_get_mfn(l2e);
     DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#"PRI_mfn"\n",
           l2t, l2_table_offset(vaddr), l2e, mfn_x(mfn));
     if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
@@ -146,7 +146,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
     l1t = map_domain_page(mfn);
     l1e = l1t[l1_table_offset(vaddr)];
     unmap_domain_page(l1t);
-    mfn = _mfn(l1e_get_pfn(l1e));
+    mfn = l1e_get_mfn(l1e);
     DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, 
l1_table_offset(vaddr),
           l1e, mfn_x(mfn));
 
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0783c1e..0463e9a 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -166,7 +166,7 @@ void *map_domain_page(mfn_t mfn)
 
     spin_unlock(&dcache->lock);
 
-    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn_x(mfn), 
__PAGE_HYPERVISOR_RW));
+    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_mfn(mfn, __PAGE_HYPERVISOR_RW));
 
  out:
     local_irq_restore(flags);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 1e0ae2f..8780a60 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1156,7 +1156,7 @@ get_page_from_l3e(
     }
 
     rc = get_page_and_type_from_mfn(
-        _mfn(l3e_get_pfn(l3e)), PGT_l2_page_table, d, partial, 1);
+        l3e_get_mfn(l3e), PGT_l2_page_table, d, partial, 1);
     if ( unlikely(rc == -EINVAL) &&
          !is_pv_32bit_domain(d) &&
          get_l3_linear_pagetable(l3e, pfn, d) )
@@ -1189,7 +1189,7 @@ get_page_from_l4e(
     }
 
     rc = get_page_and_type_from_mfn(
-        _mfn(l4e_get_pfn(l4e)), PGT_l3_page_table, d, partial, 1);
+        l4e_get_mfn(l4e), PGT_l3_page_table, d, partial, 1);
     if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) )
         rc = 0;
 
@@ -1548,7 +1548,7 @@ static int alloc_l3_table(struct page_info *page)
                 rc = -EINVAL;
             else
                 rc = get_page_and_type_from_mfn(
-                    _mfn(l3e_get_pfn(pl3e[i])),
+                    l3e_get_mfn(pl3e[i]),
                     PGT_l2_page_table | PGT_pae_xen_l2, d, partial, 1);
         }
         else if ( !is_guest_l3_slot(i) ||
@@ -5181,7 +5181,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
     /* We are looking only for read-only mappings of p.t. pages. */
     if ( ((l1e_get_flags(pte) & (_PAGE_PRESENT|_PAGE_RW)) != _PAGE_PRESENT) ||
          rangeset_contains_singleton(mmio_ro_ranges, l1e_get_pfn(pte)) ||
-         !get_page_from_mfn(_mfn(l1e_get_pfn(pte)), d) )
+         !get_page_from_mfn(l1e_get_mfn(pte), d) )
         goto bail;
 
     page = l1e_get_page(pte);
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 027ab8f..15e4877 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -409,7 +409,7 @@ static void hap_install_xen_entries_in_l4(struct vcpu *v, 
mfn_t l4mfn)
 
     /* Install a linear mapping */
     l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
-        l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR_RW);
+        l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
 
     unmap_domain_page(l4e);
 }
@@ -749,8 +749,8 @@ hap_write_p2m_entry(struct domain *d, unsigned long gfn, 
l1_pgentry_t *p,
          && !p2m_get_hostp2m(d)->defer_nested_flush ) {
         /* We are replacing a valid entry so we need to flush nested p2ms,
          * unless the only change is an increase in access rights. */
-        mfn_t omfn = _mfn(l1e_get_pfn(*p));
-        mfn_t nmfn = _mfn(l1e_get_pfn(new));
+        mfn_t omfn = l1e_get_mfn(*p);
+        mfn_t nmfn = l1e_get_mfn(new);
         flush_nestedp2m = !( mfn_x(omfn) == mfn_x(nmfn)
             && perms_strictly_increased(old_flags, l1e_get_flags(new)) );
     }
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index f0d8076..ba32d43 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -162,7 +162,7 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t 
*p2m_entry, int page_order)
 
     if ( page_order > PAGE_ORDER_2M )
     {
-        l1_pgentry_t *l3_table = 
map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
+        l1_pgentry_t *l3_table = map_domain_page(l1e_get_mfn(*p2m_entry));
         for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
             p2m_free_entry(p2m, l3_table + i, page_order - 9);
         unmap_domain_page(l3_table);
@@ -293,7 +293,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
     }
 
-    next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
+    next = map_domain_page(l1e_get_mfn(*p2m_entry));
     if ( unmap )
         unmap_domain_page(*table);
     *table = next;
@@ -808,7 +808,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
             unmap_domain_page(l4e);
             return INVALID_MFN;
         }
-        mfn = _mfn(l4e_get_pfn(*l4e));
+        mfn = l4e_get_mfn(*l4e);
         recalc = needs_recalc(l4, *l4e);
         unmap_domain_page(l4e);
     }
@@ -849,7 +849,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
             return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
         }
 
-        mfn = _mfn(l3e_get_pfn(*l3e));
+        mfn = l3e_get_mfn(*l3e);
         if ( _needs_recalc(flags) )
             recalc = 1;
         unmap_domain_page(l3e);
@@ -888,7 +888,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
         return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
     }
 
-    mfn = _mfn(l2e_get_pfn(*l2e));
+    mfn = l2e_get_mfn(*l2e);
     if ( needs_recalc(l2, *l2e) )
         recalc = 1;
     unmap_domain_page(l2e);
@@ -916,7 +916,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
         unmap_domain_page(l1e);
         return INVALID_MFN;
     }
-    mfn = _mfn(l1e_get_pfn(*l1e));
+    mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
     unmap_domain_page(l1e);
 
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 268bae4..e8ee6db 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3460,7 +3460,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
     /* If we're removing an MFN from the p2m, remove it from the shadows too */
     if ( level == 1 )
     {
-        mfn_t mfn = _mfn(l1e_get_pfn(*p));
+        mfn_t mfn = l1e_get_mfn(*p);
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
@@ -3478,8 +3478,8 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
     {
         unsigned int i;
         cpumask_t flushmask;
-        mfn_t omfn = _mfn(l1e_get_pfn(*p));
-        mfn_t nmfn = _mfn(l1e_get_pfn(new));
+        mfn_t omfn = l1e_get_mfn(*p);
+        mfn_t nmfn = l1e_get_mfn(new);
         l1_pgentry_t *npte = NULL;
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
         if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f8a8928..f0eabb6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1655,12 +1655,12 @@ sh_make_monitor_table(struct vcpu *v)
             m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
             mfn_to_page(m3mfn)->shadow_flags = 3;
             l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
-                = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
+                = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);
 
             m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
             mfn_to_page(m2mfn)->shadow_flags = 2;
             l3e = map_domain_page(m3mfn);
-            l3e[0] = l3e_from_pfn(mfn_x(m2mfn), __PAGE_HYPERVISOR_RW);
+            l3e[0] = l3e_from_mfn(m2mfn, __PAGE_HYPERVISOR_RW);
             unmap_domain_page(l3e);
 
             if ( is_pv_32bit_domain(d) )
@@ -1669,12 +1669,12 @@ sh_make_monitor_table(struct vcpu *v)
                  * area into its usual VAs in the monitor tables */
                 m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m3mfn)->shadow_flags = 3;
-                l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR_RW);
+                l4e[0] = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);
 
                 m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
                 mfn_to_page(m2mfn)->shadow_flags = 2;
                 l3e = map_domain_page(m3mfn);
-                l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
+                l3e[3] = l3e_from_mfn(m2mfn, _PAGE_PRESENT);
                 sh_install_xen_entries_in_l2h(d, m2mfn);
                 unmap_domain_page(l3e);
             }
@@ -2075,10 +2075,10 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t 
mmfn)
         /* Need to destroy the l3 and l2 monitor pages used
          * for the linear map */
         ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
-        m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
+        m3mfn = l4e_get_mfn(l4e[linear_slot]);
         l3e = map_domain_page(m3mfn);
         ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
-        shadow_free(d, _mfn(l3e_get_pfn(l3e[0])));
+        shadow_free(d, l3e_get_mfn(l3e[0]));
         unmap_domain_page(l3e);
         shadow_free(d, m3mfn);
 
@@ -2087,10 +2087,10 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t 
mmfn)
             /* Need to destroy the l3 and l2 monitor pages that map the
              * Xen VAs at 3GB-4GB */
             ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
-            m3mfn = _mfn(l4e_get_pfn(l4e[0]));
+            m3mfn = l4e_get_mfn(l4e[0]);
             l3e = map_domain_page(m3mfn);
             ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
-            shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
+            shadow_free(d, l3e_get_mfn(l3e[3]));
             unmap_domain_page(l3e);
             shadow_free(d, m3mfn);
         }
@@ -3886,12 +3886,12 @@ sh_update_linear_entries(struct vcpu *v)
             ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
 
             ASSERT(l4e_get_flags(ml4e[linear_slot]) & _PAGE_PRESENT);
-            l3mfn = _mfn(l4e_get_pfn(ml4e[linear_slot]));
+            l3mfn = l4e_get_mfn(ml4e[linear_slot]);
             ml3e = map_domain_page(l3mfn);
             unmap_domain_page(ml4e);
 
             ASSERT(l3e_get_flags(ml3e[0]) & _PAGE_PRESENT);
-            l2mfn = _mfn(l3e_get_pfn(ml3e[0]));
+            l2mfn = l3e_get_mfn(ml3e[0]);
             ml2e = map_domain_page(l2mfn);
             unmap_domain_page(ml3e);
         }
@@ -3903,7 +3903,7 @@ sh_update_linear_entries(struct vcpu *v)
         {
             ml2e[i] =
                 (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
-                ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
+                ? l2e_from_mfn(shadow_l3e_get_mfn(sl3e[i]),
                                __PAGE_HYPERVISOR_RW)
                 : l2e_empty();
         }
diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index a717d74..73f38f0 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -75,14 +75,14 @@ static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t 
sl4e)
 #endif
 
 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
-{ return _mfn(l1e_get_pfn(sl1e)); }
+{ return l1e_get_mfn(sl1e); }
 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
-{ return _mfn(l2e_get_pfn(sl2e)); }
+{ return l2e_get_mfn(sl2e); }
 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
-{ return _mfn(l3e_get_pfn(sl3e)); }
+{ return l3e_get_mfn(sl3e); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
-{ return _mfn(l4e_get_pfn(sl4e)); }
+{ return l4e_get_mfn(sl4e); }
 #endif
 
 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
@@ -115,14 +115,14 @@ static inline shadow_l4e_t shadow_l4e_empty(void)
 #endif
 
 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
-{ return l1e_from_pfn(mfn_x(mfn), flags); }
+{ return l1e_from_mfn(mfn, flags); }
 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
-{ return l2e_from_pfn(mfn_x(mfn), flags); }
+{ return l2e_from_mfn(mfn, flags); }
 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
-{ return l3e_from_pfn(mfn_x(mfn), flags); }
+{ return l3e_from_mfn(mfn, flags); }
 #if SHADOW_PAGING_LEVELS >= 4
 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
-{ return l4e_from_pfn(mfn_x(mfn), flags); }
+{ return l4e_from_mfn(mfn, flags); }
 #endif
 
 #define shadow_l1_table_offset(a) l1_table_offset(a)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 242903f..8463d71 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -71,6 +71,12 @@
 #define l4e_get_pfn(x)             \
     ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
 
+/* Get mfn mapped by pte (mfn_t). */
+#define l1e_get_mfn(x) _mfn(l1e_get_pfn(x))
+#define l2e_get_mfn(x) _mfn(l2e_get_pfn(x))
+#define l3e_get_mfn(x) _mfn(l3e_get_pfn(x))
+#define l4e_get_mfn(x) _mfn(l4e_get_pfn(x))
+
 /* Get physical address of page mapped by pte (paddr_t). */
 #define l1e_get_paddr(x)           \
     ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
@@ -114,6 +120,12 @@
 #define l4e_from_pfn(pfn, flags)   \
     ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
 
+/* Construct a pte from an mfn and access flags. */
+#define l1e_from_mfn(m, f) l1e_from_pfn(mfn_x(m), f)
+#define l2e_from_mfn(m, f) l2e_from_pfn(mfn_x(m), f)
+#define l3e_from_mfn(m, f) l3e_from_pfn(mfn_x(m), f)
+#define l4e_from_mfn(m, f) l4e_from_pfn(mfn_x(m), f)
+
 /* Construct a pte from a physical address and access flags. */
 #ifndef __ASSEMBLY__
 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
@@ -180,9 +192,9 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, 
unsigned int flags)
 #define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
 #define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
 
-#define map_l1t_from_l2e(x)        ((l1_pgentry_t 
*)map_domain_page(_mfn(l2e_get_pfn(x))))
-#define map_l2t_from_l3e(x)        ((l2_pgentry_t 
*)map_domain_page(_mfn(l3e_get_pfn(x))))
-#define map_l3t_from_l4e(x)        ((l3_pgentry_t 
*)map_domain_page(_mfn(l4e_get_pfn(x))))
+#define map_l1t_from_l2e(x)        ((l1_pgentry_t 
*)map_domain_page(l2e_get_mfn(x)))
+#define map_l2t_from_l3e(x)        ((l2_pgentry_t 
*)map_domain_page(l3e_get_mfn(x)))
+#define map_l3t_from_l4e(x)        ((l3_pgentry_t 
*)map_domain_page(l4e_get_mfn(x)))
 
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(a)         \
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.