[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 08/16] xen: Replace _mfn(INVALID_MFN) with MFN_INVALID_T



This patch is a mechanical replacement. Command used:

42sh> ack -l "_mfn\(INVALID_MFN\)" | xargs  sed -i -e 
's/_mfn(INVALID_MFN)/INVALID_MFN_T/g'

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

---
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>

    Changes in v4:
        - Patch added
---
 xen/arch/x86/mm/guest_walk.c    |  4 ++--
 xen/arch/x86/mm/hap/hap.c       |  2 +-
 xen/arch/x86/mm/p2m-ept.c       |  2 +-
 xen/arch/x86/mm/p2m-pod.c       | 18 +++++++++---------
 xen/arch/x86/mm/p2m-pt.c        | 16 ++++++++--------
 xen/arch/x86/mm/p2m.c           | 14 +++++++-------
 xen/arch/x86/mm/paging.c        | 12 ++++++------
 xen/arch/x86/mm/shadow/common.c | 32 ++++++++++++++++----------------
 xen/arch/x86/mm/shadow/multi.c  | 32 ++++++++++++++++----------------
 9 files changed, 66 insertions(+), 66 deletions(-)

diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index e850502..12b57d8 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -281,7 +281,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) +
                      ((va >> PAGE_SHIFT) & GUEST_L3_GFN_MASK));
         gw->l1e = guest_l1e_from_gfn(start, flags);
-        gw->l2mfn = gw->l1mfn = _mfn(INVALID_MFN);
+        gw->l2mfn = gw->l1mfn = INVALID_MFN_T;
         goto set_ad;
     }
 
@@ -356,7 +356,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
                      guest_l1_table_offset(va));
         gw->l1e = guest_l1e_from_gfn(start, flags);
-        gw->l1mfn = _mfn(INVALID_MFN);
+        gw->l1mfn = INVALID_MFN_T;
     } 
     else 
     {
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 9c2cd49..cd18c73 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -430,7 +430,7 @@ static mfn_t hap_make_monitor_table(struct vcpu *v)
  oom:
     HAP_ERROR("out of memory building monitor pagetable\n");
     domain_crash(d);
-    return _mfn(INVALID_MFN);
+    return INVALID_MFN_T;
 }
 
 static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 7166c71..25233f2 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -887,7 +887,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
     int i;
     int ret = 0;
     bool_t recalc = 0;
-    mfn_t mfn = _mfn(INVALID_MFN);
+    mfn_t mfn = INVALID_MFN_T;
     struct ept_data *ept = &p2m->ept;
 
     *t = p2m_mmio_dm;
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index b7ab169..3cf905b 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -559,7 +559,7 @@ p2m_pod_decrease_reservation(struct domain *d,
     {
         /* All PoD: Mark the whole region invalid and tell caller
          * we're done. */
-        p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
+        p2m_set_entry(p2m, gpfn, INVALID_MFN_T, order, p2m_invalid,
                       p2m->default_access);
         p2m->pod.entry_count-=(1<<order);
         BUG_ON(p2m->pod.entry_count < 0);
@@ -602,7 +602,7 @@ p2m_pod_decrease_reservation(struct domain *d,
         n = 1UL << cur_order;
         if ( t == p2m_populate_on_demand )
         {
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+            p2m_set_entry(p2m, gpfn + i, INVALID_MFN_T, cur_order,
                           p2m_invalid, p2m->default_access);
             p2m->pod.entry_count -= n;
             BUG_ON(p2m->pod.entry_count < 0);
@@ -624,7 +624,7 @@ p2m_pod_decrease_reservation(struct domain *d,
 
             page = mfn_to_page(mfn);
 
-            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+            p2m_set_entry(p2m, gpfn + i, INVALID_MFN_T, cur_order,
                           p2m_invalid, p2m->default_access);
             p2m_tlb_flush_sync(p2m);
             for ( j = 0; j < n; ++j )
@@ -671,7 +671,7 @@ void p2m_pod_dump_data(struct domain *d)
 static int
 p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
 {
-    mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
+    mfn_t mfn, mfn0 = INVALID_MFN_T;
     p2m_type_t type, type0 = 0;
     unsigned long * map = NULL;
     int ret=0, reset = 0;
@@ -754,7 +754,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
unsigned long gfn)
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
+    p2m_set_entry(p2m, gfn, INVALID_MFN_T, PAGE_ORDER_2M,
                   p2m_populate_on_demand, p2m->default_access);
     p2m_tlb_flush_sync(p2m);
 
@@ -871,7 +871,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long 
*gfns, int count)
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
+        p2m_set_entry(p2m, gfns[i], INVALID_MFN_T, PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
 
         /* See if the page was successfully unmapped.  (Allow one refcount
@@ -1073,7 +1073,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned 
long gfn,
          * NOTE: In a fine-grained p2m locking scenario this operation
          * may need to promote its locking from gfn->1g superpage
          */
-        p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
+        p2m_set_entry(p2m, gfn_aligned, INVALID_MFN_T, PAGE_ORDER_2M,
                       p2m_populate_on_demand, p2m->default_access);
         return 0;
     }
@@ -1157,7 +1157,7 @@ remap_and_retry:
      * need promoting the gfn lock from gfn->2M superpage */
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
-        p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+        p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN_T, PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
@@ -1215,7 +1215,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, 
unsigned long gfn,
     }
 
     /* Now, actually do the two-way mapping */
-    rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order,
+    rc = p2m_set_entry(p2m, gfn, INVALID_MFN_T, order,
                        p2m_populate_on_demand, p2m->default_access);
     if ( rc == 0 )
     {
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 4980934..93a8f59 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -764,7 +764,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
                      p2m->max_mapped_pfn )
                     break;
         }
-        return _mfn(INVALID_MFN);
+        return INVALID_MFN_T;
     }
 
     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
@@ -777,7 +777,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
         if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
         {
             unmap_domain_page(l4e);
-            return _mfn(INVALID_MFN);
+            return INVALID_MFN_T;
         }
         mfn = _mfn(l4e_get_pfn(*l4e));
         recalc = needs_recalc(l4, *l4e);
@@ -805,7 +805,7 @@ pod_retry_l3:
                     *t = p2m_populate_on_demand;
             }
             unmap_domain_page(l3e);
-            return _mfn(INVALID_MFN);
+            return INVALID_MFN_T;
         }
         if ( flags & _PAGE_PSE )
         {
@@ -817,7 +817,7 @@ pod_retry_l3:
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
-            return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
+            return (p2m_is_valid(*t)) ? mfn : INVALID_MFN_T;
         }
 
         mfn = _mfn(l3e_get_pfn(*l3e));
@@ -846,7 +846,7 @@ pod_retry_l2:
         }
     
         unmap_domain_page(l2e);
-        return _mfn(INVALID_MFN);
+        return INVALID_MFN_T;
     }
     if ( flags & _PAGE_PSE )
     {
@@ -856,7 +856,7 @@ pod_retry_l2:
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
-        return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
+        return (p2m_is_valid(*t)) ? mfn : INVALID_MFN_T;
     }
 
     mfn = _mfn(l2e_get_pfn(*l2e));
@@ -885,14 +885,14 @@ pod_retry_l1:
         }
     
         unmap_domain_page(l1e);
-        return _mfn(INVALID_MFN);
+        return INVALID_MFN_T;
     }
     mfn = _mfn(l1e_get_pfn(*l1e));
     *t = recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
-    return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
+    return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : INVALID_MFN_T;
 }
 
 static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6258a5b..f87b197 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -388,7 +388,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn,
     if (unlikely((p2m_is_broken(*t))))
     {
         /* Return invalid_mfn to avoid caller's access */
-        mfn = _mfn(INVALID_MFN);
+        mfn = INVALID_MFN_T;
         if ( q & P2M_ALLOC )
             domain_crash(p2m->domain);
     }
@@ -580,7 +580,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
 
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     p2m->defer_nested_flush = 1;
-    rc = p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+    rc = p2m_set_entry(p2m, 0, INVALID_MFN_T, PAGE_ORDER_4K,
                        p2m_invalid, p2m->default_access);
     p2m->defer_nested_flush = 0;
     p2m_unlock(p2m);
@@ -670,7 +670,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, 
unsigned long mfn,
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
-    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+    return p2m_set_entry(p2m, gfn, INVALID_MFN_T, page_order, p2m_invalid,
                          p2m->default_access);
 }
 
@@ -840,7 +840,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
     {
         gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
                  gfn_x(gfn), mfn_x(mfn));
-        rc = p2m_set_entry(p2m, gfn_x(gfn), _mfn(INVALID_MFN), page_order,
+        rc = p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN_T, page_order,
                            p2m_invalid, p2m->default_access);
         if ( rc == 0 )
         {
@@ -1117,7 +1117,7 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long 
gfn, mfn_t mfn,
         gdprintk(XENLOG_WARNING,
                  "no mapping between mfn %08lx and gfn %08lx\n",
                  mfn_x(mfn), gfn);
-    rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, p2m_invalid,
+    rc = p2m_set_entry(p2m, gfn, INVALID_MFN_T, order, p2m_invalid,
                        p2m->default_access);
 
  out:
@@ -1146,7 +1146,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn)
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
     if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
     {
-        ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+        ret = p2m_set_entry(p2m, gfn, INVALID_MFN_T, PAGE_ORDER_4K,
                             p2m_invalid, p2m->default_access);
         gfn_unlock(p2m, gfn, 0);
     }
@@ -1316,7 +1316,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long 
gfn)
         put_page(page);
 
     /* Remove mapping from p2m table */
-    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+    ret = p2m_set_entry(p2m, gfn, INVALID_MFN_T, PAGE_ORDER_4K,
                         p2m_ram_paged, a);
 
     /* Clear content before returning the page to Xen */
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 8219bb6..e086a23 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -67,7 +67,7 @@ static mfn_t paging_new_log_dirty_page(struct domain *d)
     if ( unlikely(page == NULL) )
     {
         d->arch.paging.log_dirty.failed_allocs++;
-        return _mfn(INVALID_MFN);
+        return INVALID_MFN_T;
     }
 
     d->arch.paging.log_dirty.allocs++;
@@ -95,7 +95,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
         int i;
         mfn_t *node = map_domain_page(mfn);
         for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
-            node[i] = _mfn(INVALID_MFN);
+            node[i] = INVALID_MFN_T;
         unmap_domain_page(node);
     }
     return mfn;
@@ -167,7 +167,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
int rc)
 
             unmap_domain_page(l2);
             paging_free_log_dirty_page(d, l3[i3]);
-            l3[i3] = _mfn(INVALID_MFN);
+            l3[i3] = INVALID_MFN_T;
 
             if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
             {
@@ -182,7 +182,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
int rc)
         if ( rc )
             break;
         paging_free_log_dirty_page(d, l4[i4]);
-        l4[i4] = _mfn(INVALID_MFN);
+        l4[i4] = INVALID_MFN_T;
 
         if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
         {
@@ -198,7 +198,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
int rc)
     if ( !rc )
     {
         paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
-        d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+        d->arch.paging.log_dirty.top = INVALID_MFN_T;
 
         ASSERT(d->arch.paging.log_dirty.allocs == 0);
         d->arch.paging.log_dirty.failed_allocs = 0;
@@ -660,7 +660,7 @@ int paging_domain_init(struct domain *d, unsigned int 
domcr_flags)
     /* This must be initialized separately from the rest of the
      * log-dirty init code as that can be called more than once and we
      * don't want to leak any active log-dirty bitmaps */
-    d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+    d->arch.paging.log_dirty.top = INVALID_MFN_T;
 
     /*
      * Shadow pagetables are the default, but we will use
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 226e32d..4a32221 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -88,10 +88,10 @@ void shadow_vcpu_init(struct vcpu *v)
 
     for ( i = 0; i < SHADOW_OOS_PAGES; i++ )
     {
-        v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN);
-        v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN);
+        v->arch.paging.shadow.oos[i] = INVALID_MFN_T;
+        v->arch.paging.shadow.oos_snapshot[i] = INVALID_MFN_T;
         for ( j = 0; j < SHADOW_OOS_FIXUPS; j++ )
-            v->arch.paging.shadow.oos_fixup[i].smfn[j] = _mfn(INVALID_MFN);
+            v->arch.paging.shadow.oos_fixup[i].smfn[j] = INVALID_MFN_T;
     }
 #endif
 
@@ -598,7 +598,7 @@ static inline int oos_fixup_flush_gmfn(struct vcpu *v, 
mfn_t gmfn,
             sh_remove_write_access_from_sl1p(d, gmfn,
                                              fixup->smfn[i],
                                              fixup->off[i]);
-            fixup->smfn[i] = _mfn(INVALID_MFN);
+            fixup->smfn[i] = INVALID_MFN_T;
         }
     }
 
@@ -757,7 +757,7 @@ static void oos_hash_add(struct vcpu *v, mfn_t gmfn)
     struct oos_fixup fixup = { .next = 0 };
 
     for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
-        fixup.smfn[i] = _mfn(INVALID_MFN);
+        fixup.smfn[i] = INVALID_MFN_T;
 
     idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
     oidx = idx;
@@ -807,7 +807,7 @@ static void oos_hash_remove(struct domain *d, mfn_t gmfn)
             idx = (idx + 1) % SHADOW_OOS_PAGES;
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
         {
-            oos[idx] = _mfn(INVALID_MFN);
+            oos[idx] = INVALID_MFN_T;
             return;
         }
     }
@@ -838,7 +838,7 @@ mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn)
 
     SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn));
     BUG();
-    return _mfn(INVALID_MFN);
+    return INVALID_MFN_T;
 }
 
 /* Pull a single guest page back into sync */
@@ -862,7 +862,7 @@ void sh_resync(struct domain *d, mfn_t gmfn)
         if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
         {
             _sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
-            oos[idx] = _mfn(INVALID_MFN);
+            oos[idx] = INVALID_MFN_T;
             return;
         }
     }
@@ -914,7 +914,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
         {
             /* Write-protect and sync contents */
             _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
-            oos[idx] = _mfn(INVALID_MFN);
+            oos[idx] = INVALID_MFN_T;
         }
 
  resync_others:
@@ -948,7 +948,7 @@ void sh_resync_all(struct vcpu *v, int skip, int this, int 
others)
             {
                 /* Write-protect and sync contents */
                 _sh_resync(other, oos[idx], &oos_fixup[idx], 
oos_snapshot[idx]);
-                oos[idx] = _mfn(INVALID_MFN);
+                oos[idx] = INVALID_MFN_T;
             }
         }
     }
@@ -1784,7 +1784,7 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long 
vaddr,
     if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
     {
         /* Whole write fits on a single page. */
-        sh_ctxt->mfn[1] = _mfn(INVALID_MFN);
+        sh_ctxt->mfn[1] = INVALID_MFN_T;
         map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
     }
     else if ( !is_hvm_domain(d) )
@@ -2086,7 +2086,7 @@ mfn_t shadow_hash_lookup(struct domain *d, unsigned long 
n, unsigned int t)
     }
 
     perfc_incr(shadow_hash_lookup_miss);
-    return _mfn(INVALID_MFN);
+    return INVALID_MFN_T;
 }
 
 void shadow_hash_insert(struct domain *d, unsigned long n, unsigned int t,
@@ -2910,7 +2910,7 @@ void sh_reset_l3_up_pointers(struct vcpu *v)
     };
     static const unsigned int callback_mask = SHF_L3_64;
 
-    hash_vcpu_foreach(v, callback_mask, callbacks, _mfn(INVALID_MFN));
+    hash_vcpu_foreach(v, callback_mask, callbacks, INVALID_MFN_T);
 }
 
 
@@ -3284,7 +3284,7 @@ void shadow_teardown(struct domain *d, int *preempted)
                 if ( mfn_valid(oos_snapshot[i]) )
                 {
                     shadow_free(d, oos_snapshot[i]);
-                    oos_snapshot[i] = _mfn(INVALID_MFN);
+                    oos_snapshot[i] = INVALID_MFN_T;
                 }
         }
 #endif /* OOS */
@@ -3449,7 +3449,7 @@ static int shadow_one_bit_disable(struct domain *d, u32 
mode)
                     if ( mfn_valid(oos_snapshot[i]) )
                     {
                         shadow_free(d, oos_snapshot[i]);
-                        oos_snapshot[i] = _mfn(INVALID_MFN);
+                        oos_snapshot[i] = INVALID_MFN_T;
                     }
             }
 #endif /* OOS */
@@ -3968,7 +3968,7 @@ void shadow_audit_tables(struct vcpu *v)
         }
     }
 
-    hash_vcpu_foreach(v, mask, callbacks, _mfn(INVALID_MFN));
+    hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN_T);
 }
 
 #endif /* Shadow audit */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index dfe59a2..96d270a 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -177,7 +177,7 @@ sh_walk_guest_tables(struct vcpu *v, unsigned long va, 
walk_t *gw,
 {
     return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
 #if GUEST_PAGING_LEVELS == 3 /* PAE */
-                             _mfn(INVALID_MFN),
+                             INVALID_MFN_T,
                              v->arch.paging.shadow.gl3e
 #else /* 32 or 64 */
                              pagetable_get_mfn(v->arch.guest_table),
@@ -336,32 +336,32 @@ static void sh_audit_gw(struct vcpu *v, walk_t *gw)
     if ( mfn_valid(gw->l4mfn)
          && mfn_valid((smfn = get_shadow_status(d, gw->l4mfn,
                                                 SH_type_l4_shadow))) )
-        (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
+        (void) sh_audit_l4_table(v, smfn, INVALID_MFN_T);
     if ( mfn_valid(gw->l3mfn)
          && mfn_valid((smfn = get_shadow_status(d, gw->l3mfn,
                                                 SH_type_l3_shadow))) )
-        (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
+        (void) sh_audit_l3_table(v, smfn, INVALID_MFN_T);
 #endif /* PAE or 64... */
     if ( mfn_valid(gw->l2mfn) )
     {
         if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
                                                  SH_type_l2_shadow))) )
-            (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
+            (void) sh_audit_l2_table(v, smfn, INVALID_MFN_T);
 #if GUEST_PAGING_LEVELS == 3
         if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
                                                  SH_type_l2h_shadow))) )
-            (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
+            (void) sh_audit_l2_table(v, smfn, INVALID_MFN_T);
 #endif
     }
     if ( mfn_valid(gw->l1mfn)
          && mfn_valid((smfn = get_shadow_status(d, gw->l1mfn,
                                                 SH_type_l1_shadow))) )
-        (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
+        (void) sh_audit_l1_table(v, smfn, INVALID_MFN_T);
     else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
               && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
               && mfn_valid(
               (smfn = get_fl1_shadow_status(d, guest_l2e_get_gfn(gw->l2e)))) )
-        (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
+        (void) sh_audit_fl1_table(v, smfn, INVALID_MFN_T);
 }
 
 #else
@@ -1752,7 +1752,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct 
vcpu *v,
 {
 #if GUEST_PAGING_LEVELS >= 4 /* 64bit... */
     struct domain *d = v->domain;
-    mfn_t sl3mfn = _mfn(INVALID_MFN);
+    mfn_t sl3mfn = INVALID_MFN_T;
     shadow_l3e_t *sl3e;
     if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
     /* Get the l3e */
@@ -2158,7 +2158,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, 
mfn_t sl4mfn, void *se)
     shadow_l4e_t new_sl4e;
     guest_l4e_t new_gl4e = *(guest_l4e_t *)new_ge;
     shadow_l4e_t *sl4p = se;
-    mfn_t sl3mfn = _mfn(INVALID_MFN);
+    mfn_t sl3mfn = INVALID_MFN_T;
     struct domain *d = v->domain;
     p2m_type_t p2mt;
     int result = 0;
@@ -2217,7 +2217,7 @@ static int validate_gl3e(struct vcpu *v, void *new_ge, 
mfn_t sl3mfn, void *se)
     shadow_l3e_t new_sl3e;
     guest_l3e_t new_gl3e = *(guest_l3e_t *)new_ge;
     shadow_l3e_t *sl3p = se;
-    mfn_t sl2mfn = _mfn(INVALID_MFN);
+    mfn_t sl2mfn = INVALID_MFN_T;
     p2m_type_t p2mt;
     int result = 0;
 
@@ -2250,7 +2250,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
mfn_t sl2mfn, void *se)
     shadow_l2e_t new_sl2e;
     guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
     shadow_l2e_t *sl2p = se;
-    mfn_t sl1mfn = _mfn(INVALID_MFN);
+    mfn_t sl1mfn = INVALID_MFN_T;
     p2m_type_t p2mt;
     int result = 0;
 
@@ -4105,10 +4105,10 @@ sh_update_cr3(struct vcpu *v, int do_locking)
                                            ? SH_type_l2h_shadow
                                            : SH_type_l2_shadow);
                 else
-                    sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+                    sh_set_toplevel_shadow(v, i, INVALID_MFN_T, 0);
             }
             else
-                sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+                sh_set_toplevel_shadow(v, i, INVALID_MFN_T, 0);
         }
     }
 #elif GUEST_PAGING_LEVELS == 4
@@ -4531,7 +4531,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t 
gpa)
 
         if ( fast_path ) {
             if ( pagetable_is_null(v->arch.shadow_table[i]) )
-                smfn = _mfn(INVALID_MFN);
+                smfn = INVALID_MFN_T;
             else
                 smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[i]));
         }
@@ -4541,7 +4541,7 @@ static void sh_pagetable_dying(struct vcpu *v, paddr_t 
gpa)
             gmfn = get_gfn_query_unlocked(d, gfn_x(guest_l3e_get_gfn(gl3e[i])),
                                           &p2mt);
             smfn = unlikely(mfn_x(gmfn) == INVALID_MFN)
-                   ? _mfn(INVALID_MFN)
+                   ? INVALID_MFN_T
                    : shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow);
         }
 
@@ -4846,7 +4846,7 @@ int sh_audit_fl1_table(struct vcpu *v, mfn_t sl1mfn, 
mfn_t x)
 {
     guest_l1e_t *gl1e, e;
     shadow_l1e_t *sl1e;
-    mfn_t gl1mfn = _mfn(INVALID_MFN);
+    mfn_t gl1mfn = INVALID_MFN_T;
     int f;
     int done = 0;
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.