[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 13/14] xen/mm: Convert {s, g}et_gpfn_from_mfn() to use typesafe MFN



On Tue, 7 May 2019, Julien Grall wrote:
> The first parameter of {s,g}et_gpfn_from_mfn() is an MFN, so it can be
> switched to use the typesafe.
> 
> At the same time, replace gpfn with pfn in the helpers as they all deal
> with PFN and also turn the macros to static inline.
> 
> Note that the return of the getter and the 2nd parameter of the setter
> have not been converted to use typesafe PFN because it was requiring
> more changes than expected.
> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

I only skimmed through the x86 bits.

Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>     Changes in v2:
>         - Patch added
> ---
>  xen/arch/x86/cpu/mcheck/mcaction.c |  2 +-
>  xen/arch/x86/mm.c                  | 14 ++++----
>  xen/arch/x86/mm/mem_sharing.c      | 19 +++++------
>  xen/arch/x86/mm/p2m-pod.c          |  4 +--
>  xen/arch/x86/mm/p2m-pt.c           | 37 ++++++++++-----------
>  xen/arch/x86/mm/p2m.c              | 66 
> +++++++++++++++++++-------------------
>  xen/arch/x86/mm/paging.c           |  4 +--
>  xen/arch/x86/pv/dom0_build.c       |  6 ++--
>  xen/arch/x86/x86_64/traps.c        | 41 +++++++++++------------
>  xen/common/memory.c                |  2 +-
>  xen/common/page_alloc.c            |  2 +-
>  xen/include/asm-arm/mm.h           |  2 +-
>  xen/include/asm-x86/grant_table.h  |  2 +-
>  xen/include/asm-x86/mm.h           | 16 +++++----
>  xen/include/asm-x86/p2m.h          |  2 +-
>  15 files changed, 113 insertions(+), 106 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c 
> b/xen/arch/x86/cpu/mcheck/mcaction.c
> index 69332fb84d..5e78fb7703 100644
> --- a/xen/arch/x86/cpu/mcheck/mcaction.c
> +++ b/xen/arch/x86/cpu/mcheck/mcaction.c
> @@ -89,7 +89,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
>              {
>                  d = get_domain_by_id(bank->mc_domid);
>                  ASSERT(d);
> -                gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
> +                gfn = get_pfn_from_mfn(maddr_to_mfn(bank->mc_addr));
>  
>                  if ( unmmap_broken_page(d, mfn, gfn) )
>                  {
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 7d887f2699..60c47582be 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -502,7 +502,7 @@ void share_xen_page_with_guest(struct page_info *page, 
> struct domain *d,
>      if ( page_get_owner(page) == d )
>          return;
>  
> -    set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), INVALID_M2P_ENTRY);
> +    set_pfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
>  
>      spin_lock(&d->page_alloc_lock);
>  
> @@ -1077,7 +1077,7 @@ get_page_from_l1e(
>  
>              gdprintk(XENLOG_WARNING, "Error updating mappings for mfn %" 
> PRI_mfn
>                       " (pfn %" PRI_pfn ", from L1 entry %" PRIpte ") for 
> d%d\n",
> -                     mfn, get_gpfn_from_mfn(mfn),
> +                     mfn, get_pfn_from_mfn(_mfn(mfn)),
>                       l1e_get_intpte(l1e), l1e_owner->domain_id);
>              return err;
>          }
> @@ -1088,7 +1088,7 @@ get_page_from_l1e(
>   could_not_pin:
>      gdprintk(XENLOG_WARNING, "Error getting mfn %" PRI_mfn " (pfn %" PRI_pfn
>               ") from L1 entry %" PRIpte " for l1e_owner d%d, pg_owner d%d\n",
> -             mfn, get_gpfn_from_mfn(mfn),
> +             mfn, get_pfn_from_mfn(_mfn(mfn)),
>               l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id);
>      if ( real_pg_owner != NULL )
>          put_page(page);
> @@ -2604,7 +2604,7 @@ static int alloc_page_type(struct page_info *page, 
> unsigned long type,
>                   " (pfn %" PRI_pfn ") for type %" PRtype_info
>                   ": caf=%08lx taf=%" PRtype_info "\n",
>                   mfn_x(page_to_mfn(page)),
> -                 get_gpfn_from_mfn(mfn_x(page_to_mfn(page))),
> +                 get_pfn_from_mfn(page_to_mfn(page)),
>                   type, page->count_info, page->u.inuse.type_info);
>          if ( page != current->arch.old_guest_table )
>              page->u.inuse.type_info = 0;
> @@ -2890,7 +2890,7 @@ static int _get_page_type(struct page_info *page, 
> unsigned long type,
>                       "Bad type (saw %" PRtype_info " != exp %" PRtype_info 
> ") "
>                       "for mfn %" PRI_mfn " (pfn %" PRI_pfn ")\n",
>                       x, type, mfn_x(page_to_mfn(page)),
> -                     get_gpfn_from_mfn(mfn_x(page_to_mfn(page))));
> +                     get_pfn_from_mfn(page_to_mfn(page)));
>              return -EINVAL;
>          }
>          else if ( unlikely(!(x & PGT_validated)) )
> @@ -4002,7 +4002,7 @@ long do_mmu_update(
>                  break;
>              }
>  
> -            set_gpfn_from_mfn(mfn_x(mfn), gpfn);
> +            set_pfn_from_mfn(mfn, gpfn);
>              paging_mark_pfn_dirty(pg_owner, _pfn(gpfn));
>  
>              put_page(page);
> @@ -4529,7 +4529,7 @@ int xenmem_add_to_physmap_one(
>          goto put_both;
>  
>      /* Unmap from old location, if any. */
> -    old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
> +    old_gpfn = get_pfn_from_mfn(mfn);
>      ASSERT(!SHARED_M2P(old_gpfn));
>      if ( space == XENMAPSPACE_gmfn && old_gpfn != gfn )
>      {
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index 5ac9d8f54c..af903c11e9 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -391,11 +391,12 @@ static inline void mem_sharing_gfn_destroy(struct 
> page_info *page,
>      xfree(gfn_info);
>  }
>  
> -static struct page_info* mem_sharing_lookup(unsigned long mfn)
> +static struct page_info* mem_sharing_lookup(mfn_t mfn)
>  {
> -    if ( mfn_valid(_mfn(mfn)) )
> +    if ( mfn_valid(mfn) )
>      {
> -        struct page_info* page = mfn_to_page(_mfn(mfn));
> +        struct page_info* page = mfn_to_page(mfn);
> +
>          if ( page_get_owner(page) == dom_cow )
>          {
>              /* Count has to be at least two, because we're called
> @@ -404,7 +405,7 @@ static struct page_info* mem_sharing_lookup(unsigned long 
> mfn)
>              unsigned long t = read_atomic(&page->u.inuse.type_info);
>              ASSERT((t & PGT_type_mask) == PGT_shared_page);
>              ASSERT((t & PGT_count_mask) >= 2);
> -            ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
> +            ASSERT(SHARED_M2P(get_pfn_from_mfn(mfn)));
>              return page;
>          }
>      }
> @@ -464,10 +465,10 @@ static int audit(void)
>          }
>  
>          /* Check the m2p entry */
> -        if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
> +        if ( !SHARED_M2P(get_pfn_from_mfn(mfn)) )
>          {
>             MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
> -                             mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
> +                             mfn_x(mfn), get_pfn_from_mfn(mfn));
>             errors++;
>          }
>  
> @@ -693,7 +694,7 @@ static inline struct page_info *__grab_shared_page(mfn_t 
> mfn)
>      if ( !mem_sharing_page_lock(pg) )
>          return NULL;
>  
> -    if ( mem_sharing_lookup(mfn_x(mfn)) == NULL )
> +    if ( mem_sharing_lookup(mfn) == NULL )
>      {
>          mem_sharing_page_unlock(pg);
>          return NULL;
> @@ -877,7 +878,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
>      atomic_inc(&nr_shared_mfns);
>  
>      /* Update m2p entry to SHARED_M2P_ENTRY */
> -    set_gpfn_from_mfn(mfn_x(mfn), SHARED_M2P_ENTRY);
> +    set_pfn_from_mfn(mfn, SHARED_M2P_ENTRY);
>  
>      *phandle = page->sharing->handle;
>      audit_add_list(page);
> @@ -1222,7 +1223,7 @@ private_page_found:
>      }
>  
>      /* Update m2p entry */
> -    set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
> +    set_pfn_from_mfn(page_to_mfn(page), gfn);
>  
>      /* Now that the gfn<->mfn map is properly established,
>       * marking dirty is feasible */
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index 4313863066..9e001738f4 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -652,7 +652,7 @@ p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, 
> unsigned int order)
>              }
>              p2m_tlb_flush_sync(p2m);
>              for ( j = 0; j < n; ++j )
> -                set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
> +                set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
>              p2m_pod_cache_add(p2m, page, cur_order);
>  
>              steal_for_cache =  ( p2m->pod.entry_count > p2m->pod.count );
> @@ -1203,7 +1203,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t 
> gfn,
>  
>      for( i = 0; i < (1UL << order); i++ )
>      {
> -        set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i);
> +        set_pfn_from_mfn(mfn_add(mfn, i), gfn_x(gfn_aligned) + i);
>          paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn_aligned) + i));
>      }
>  
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index cafc9f299b..0e85819f9b 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -991,7 +991,8 @@ static int p2m_pt_change_entry_type_range(struct 
> p2m_domain *p2m,
>  long p2m_pt_audit_p2m(struct p2m_domain *p2m)
>  {
>      unsigned long entry_count = 0, pmbad = 0;
> -    unsigned long mfn, gfn, m2pfn;
> +    unsigned long gfn, m2pfn;
> +    mfn_t mfn;
>  
>      ASSERT(p2m_locked_by_me(p2m));
>      ASSERT(pod_locked_by_me(p2m));
> @@ -1030,19 +1031,19 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
>                  /* check for 1GB super page */
>                  if ( l3e_get_flags(l3e[i3]) & _PAGE_PSE )
>                  {
> -                    mfn = l3e_get_pfn(l3e[i3]);
> -                    ASSERT(mfn_valid(_mfn(mfn)));
> +                    mfn = l3e_get_mfn(l3e[i3]);
> +                    ASSERT(mfn_valid(mfn));
>                      /* we have to cover 512x512 4K pages */
>                      for ( i2 = 0; 
>                            i2 < (L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES);
>                            i2++)
>                      {
> -                        m2pfn = get_gpfn_from_mfn(mfn+i2);
> +                        m2pfn = get_pfn_from_mfn(mfn_add(mfn, i2));
>                          if ( m2pfn != (gfn + i2) )
>                          {
>                              pmbad++;
> -                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
> -                                       " -> gfn %#lx\n", gfn+i2, mfn+i2,
> +                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %"PRI_mfn" 
> gfn %#lx\n",
> +                                       gfn + i2, mfn_x(mfn_add(mfn, i2)),
>                                         m2pfn);
>                              BUG();
>                          }
> @@ -1067,17 +1068,17 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
>                      /* check for super page */
>                      if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE )
>                      {
> -                        mfn = l2e_get_pfn(l2e[i2]);
> -                        ASSERT(mfn_valid(_mfn(mfn)));
> +                        mfn = l2e_get_mfn(l2e[i2]);
> +                        ASSERT(mfn_valid(mfn));
>                          for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++)
>                          {
> -                            m2pfn = get_gpfn_from_mfn(mfn+i1);
> +                            m2pfn = get_pfn_from_mfn(mfn_add(mfn, i1));
>                              /* Allow shared M2Ps */
>                              if ( (m2pfn != (gfn + i1)) && !SHARED_M2P(m2pfn) 
> )
>                              {
>                                  pmbad++;
> -                                P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
> -                                           " -> gfn %#lx\n", gfn+i1, mfn+i1,
> +                                P2M_PRINTK("mismatch: gfn %#lx -> mfn 
> %"PRI_mfn" -> gfn %#lx\n",
> +                                           gfn + i1, mfn_x(mfn_add(mfn, i1)),
>                                             m2pfn);
>                                  BUG();
>                              }
> @@ -1099,19 +1100,19 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
>                                  entry_count++;
>                              continue;
>                          }
> -                        mfn = l1e_get_pfn(l1e[i1]);
> -                        ASSERT(mfn_valid(_mfn(mfn)));
> -                        m2pfn = get_gpfn_from_mfn(mfn);
> +                        mfn = l1e_get_mfn(l1e[i1]);
> +                        ASSERT(mfn_valid(mfn));
> +                        m2pfn = get_pfn_from_mfn(mfn);
>                          if ( m2pfn != gfn &&
>                               type != p2m_mmio_direct &&
>                               !p2m_is_grant(type) &&
>                               !p2m_is_shared(type) )
>                          {
>                              pmbad++;
> -                            printk("mismatch: gfn %#lx -> mfn %#lx"
> -                                   " -> gfn %#lx\n", gfn, mfn, m2pfn);
> -                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
> -                                       " -> gfn %#lx\n", gfn, mfn, m2pfn);
> +                            printk("mismatch: gfn %#lx -> mfn %"PRI_mfn" -> 
> gfn %#lx\n",
> +                                   gfn, mfn_x(mfn), m2pfn);
> +                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %"PRI_mfn" 
> -> gfn %#lx\n",
> +                                       gfn, mfn_x(mfn), m2pfn);
>                              BUG();
>                          }
>                      }
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 3c98f72dbb..003ed97521 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -769,7 +769,7 @@ void p2m_final_teardown(struct domain *d)
>  
>  
>  static int
> -p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long 
> mfn,
> +p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, mfn_t mfn,
>                  unsigned int page_order)
>  {
>      unsigned long i;
> @@ -783,17 +783,17 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long 
> gfn_l, unsigned long mfn,
>          return 0;
>  
>      ASSERT(gfn_locked_by_me(p2m, gfn));
> -    P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
> +    P2M_DEBUG("removing gfn=%#lx mfn=%"PRI_mfn"\n", gfn_l, mfn_x(mfn));
>  
> -    if ( mfn_valid(_mfn(mfn)) )
> +    if ( mfn_valid(mfn) )
>      {
>          for ( i = 0; i < (1UL << page_order); i++ )
>          {
>              mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
>                                          NULL, NULL);
>              if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) 
> )
> -                set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
> -            ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
> +                set_pfn_from_mfn(mfn_add(mfn, i), INVALID_M2P_ENTRY);
> +            ASSERT( !p2m_is_valid(t) || mfn_eq(mfn_add(mfn, i), mfn_return) 
> );
>          }
>      }
>      return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
> @@ -807,7 +807,7 @@ guest_physmap_remove_page(struct domain *d, gfn_t gfn,
>      struct p2m_domain *p2m = p2m_get_hostp2m(d);
>      int rc;
>      gfn_lock(p2m, gfn, page_order);
> -    rc = p2m_remove_page(p2m, gfn_x(gfn), mfn_x(mfn), page_order);
> +    rc = p2m_remove_page(p2m, gfn_x(gfn), mfn, page_order);
>      gfn_unlock(p2m, gfn, page_order);
>      return rc;
>  }
> @@ -908,7 +908,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
> mfn_t mfn,
>          else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
>          {
>              ASSERT(mfn_valid(omfn));
> -            set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> +            set_pfn_from_mfn(omfn, INVALID_M2P_ENTRY);
>          }
>          else if ( ot == p2m_populate_on_demand )
>          {
> @@ -951,7 +951,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
> mfn_t mfn,
>                  P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
>                            gfn_x(ogfn) , mfn_x(omfn));
>                  if ( mfn_eq(omfn, mfn_add(mfn, i)) )
> -                    p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(mfn_add(mfn, i)),
> +                    p2m_remove_page(p2m, gfn_x(ogfn), mfn_add(mfn, i),
>                                      0);
>              }
>          }
> @@ -968,8 +968,8 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
> mfn_t mfn,
>          if ( !p2m_is_grant(t) )
>          {
>              for ( i = 0; i < (1UL << page_order); i++ )
> -                set_gpfn_from_mfn(mfn_x(mfn_add(mfn, i)),
> -                                  gfn_x(gfn_add(gfn, i)));
> +                set_pfn_from_mfn(mfn_add(mfn, i),
> +                                 gfn_x(gfn_add(gfn, i)));
>          }
>      }
>      else
> @@ -1272,7 +1272,7 @@ static int set_typed_p2m_entry(struct domain *d, 
> unsigned long gfn_l,
>          for ( i = 0; i < (1UL << order); ++i )
>          {
>              ASSERT(mfn_valid(mfn_add(omfn, i)));
> -            set_gpfn_from_mfn(mfn_x(omfn) + i, INVALID_M2P_ENTRY);
> +            set_pfn_from_mfn(mfn_add(omfn, i), INVALID_M2P_ENTRY);
>          }
>      }
>  
> @@ -1467,7 +1467,7 @@ int set_shared_p2m_entry(struct domain *d, unsigned 
> long gfn_l, mfn_t mfn)
>      pg_type = read_atomic(&(mfn_to_page(omfn)->u.inuse.type_info));
>      if ( (pg_type & PGT_count_mask) == 0
>           || (pg_type & PGT_type_mask) != PGT_shared_page )
> -        set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> +        set_pfn_from_mfn(omfn, INVALID_M2P_ENTRY);
>  
>      P2M_DEBUG("set shared %lx %lx\n", gfn_l, mfn_x(mfn));
>      rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
> @@ -1821,7 +1821,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
> gfn_l, uint64_t buffer)
>      ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
>                          paging_mode_log_dirty(d) ? p2m_ram_logdirty
>                                                   : p2m_ram_rw, a);
> -    set_gpfn_from_mfn(mfn_x(mfn), gfn_l);
> +    set_pfn_from_mfn(mfn, gfn_l);
>  
>      if ( !page_extant )
>          atomic_dec(&d->paged_pages);
> @@ -1872,7 +1872,7 @@ void p2m_mem_paging_resume(struct domain *d, 
> vm_event_response_t *rsp)
>                                     p2m_ram_rw, a);
>  
>              if ( !rc )
> -                set_gpfn_from_mfn(mfn_x(mfn), gfn_x(gfn));
> +                set_pfn_from_mfn(mfn, gfn_x(gfn));
>          }
>          gfn_unlock(p2m, gfn, 0);
>      }
> @@ -2635,7 +2635,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned 
> int idx,
>      if ( gfn_eq(new_gfn, INVALID_GFN) )
>      {
>          if ( mfn_valid(mfn) )
> -            p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
> +            p2m_remove_page(ap2m, gfn_x(old_gfn), mfn, PAGE_ORDER_4K);
>          rc = 0;
>          goto out;
>      }
> @@ -2772,8 +2772,8 @@ void audit_p2m(struct domain *d,
>  {
>      struct page_info *page;
>      struct domain *od;
> -    unsigned long mfn, gfn;
> -    mfn_t p2mfn;
> +    unsigned long gfn;
> +    mfn_t p2mfn, mfn;
>      unsigned long orphans_count = 0, mpbad = 0, pmbad = 0;
>      p2m_access_t p2ma;
>      p2m_type_t type;
> @@ -2795,54 +2795,54 @@ void audit_p2m(struct domain *d,
>      spin_lock(&d->page_alloc_lock);
>      page_list_for_each ( page, &d->page_list )
>      {
> -        mfn = mfn_x(page_to_mfn(page));
> +        mfn = page_to_mfn(page);
>  
> -        P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);
> +        P2M_PRINTK("auditing guest page, mfn=%"PRI_mfn"\n", mfn_x(mfn));
>  
>          od = page_get_owner(page);
>  
>          if ( od != d )
>          {
> -            P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
> -                       mfn, od, (od?od->domain_id:-1), d, d->domain_id);
> +            P2M_PRINTK("wrong owner %"PRI_mfn" -> %p(%u) != %p(%u)\n",
> +                       mfn_x(mfn), od, (od?od->domain_id:-1), d, 
> d->domain_id);
>              continue;
>          }
>  
> -        gfn = get_gpfn_from_mfn(mfn);
> +        gfn = get_pfn_from_mfn(mfn);
>          if ( gfn == INVALID_M2P_ENTRY )
>          {
>              orphans_count++;
> -            P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
> -                           mfn);
> +            P2M_PRINTK("orphaned guest page: mfn=%"PRI_mfn" has invalid 
> gfn\n",
> +                       mfn_x(mfn));
>              continue;
>          }
>  
>          if ( SHARED_M2P(gfn) )
>          {
> -            P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
> -                    mfn);
> +            P2M_PRINTK("shared mfn (%"PRI_mfn") on domain page list!\n",
> +                       mfn_x(mfn));
>              continue;
>          }
>  
>          p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL);
> -        if ( mfn_x(p2mfn) != mfn )
> +        if ( !mfn_eq(p2mfn, mfn) )
>          {
>              mpbad++;
> -            P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"
> +            P2M_PRINTK("map mismatch mfn %"PRI_mfn" -> gfn %#lx -> mfn 
> %"PRI_mfn""
>                         " (-> gfn %#lx)\n",
> -                       mfn, gfn, mfn_x(p2mfn),
> +                       mfn_x(mfn), gfn, mfn_x(p2mfn),
>                         (mfn_valid(p2mfn)
> -                        ? get_gpfn_from_mfn(mfn_x(p2mfn))
> +                        ? get_pfn_from_mfn(p2mfn)
>                          : -1u));
>              /* This m2p entry is stale: the domain has another frame in
>               * this physical slot.  No great disaster, but for neatness,
>               * blow away the m2p entry. */
> -            set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
> +            set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
>          }
>          __put_gfn(p2m, gfn);
>  
> -        P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n",
> -                       mfn, gfn, mfn_x(p2mfn));
> +        P2M_PRINTK("OK: mfn=%"PRI_mfn", gfn=%#lx, p2mfn=%"PRI_mfn"\n",
> +                   mfn_x(mfn), gfn, mfn_x(p2mfn));
>      }
>      spin_unlock(&d->page_alloc_lock);
>  
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index 9b0f268e74..cef2f90186 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -344,7 +344,7 @@ void paging_mark_dirty(struct domain *d, mfn_t gmfn)
>          return;
>  
>      /* We /really/ mean PFN here, even for non-translated guests. */
> -    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
> +    pfn = _pfn(get_pfn_from_mfn(gmfn));
>  
>      paging_mark_pfn_dirty(d, pfn);
>  }
> @@ -362,7 +362,7 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
>      ASSERT(paging_mode_log_dirty(d));
>  
>      /* We /really/ mean PFN here, even for non-translated guests. */
> -    pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
> +    pfn = _pfn(get_pfn_from_mfn(gmfn));
>      /* Invalid pages can't be dirty. */
>      if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
>          return 0;
> diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
> index cef2d42254..b39ab67092 100644
> --- a/xen/arch/x86/pv/dom0_build.c
> +++ b/xen/arch/x86/pv/dom0_build.c
> @@ -39,7 +39,7 @@ void __init dom0_update_physmap(struct domain *d, unsigned 
> long pfn,
>      else
>          ((unsigned int *)vphysmap_s)[pfn] = mfn;
>  
> -    set_gpfn_from_mfn(mfn, pfn);
> +    set_pfn_from_mfn(_mfn(mfn), pfn);
>  }
>  
>  static __init void mark_pv_pt_pages_rdonly(struct domain *d,
> @@ -798,8 +798,8 @@ int __init dom0_construct_pv(struct domain *d,
>      page_list_for_each ( page, &d->page_list )
>      {
>          mfn = mfn_x(page_to_mfn(page));
> -        BUG_ON(SHARED_M2P(get_gpfn_from_mfn(mfn)));
> -        if ( get_gpfn_from_mfn(mfn) >= count )
> +        BUG_ON(SHARED_M2P(get_pfn_from_mfn(_mfn(mfn))));
> +        if ( get_pfn_from_mfn(_mfn(mfn)) >= count )
>          {
>              BUG_ON(is_pv_32bit_domain(d));
>              if ( !page->u.inuse.type_info &&
> diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
> index 44af765e3e..f80f2250fe 100644
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -184,7 +184,8 @@ void vcpu_show_registers(const struct vcpu *v)
>  
>  void show_page_walk(unsigned long addr)
>  {
> -    unsigned long pfn, mfn = read_cr3() >> PAGE_SHIFT;
> +    unsigned long pfn;
> +    mfn_t mfn = maddr_to_mfn(read_cr3());
>      l4_pgentry_t l4e, *l4t;
>      l3_pgentry_t l3e, *l3t;
>      l2_pgentry_t l2e, *l2t;
> @@ -194,52 +195,52 @@ void show_page_walk(unsigned long addr)
>      if ( !is_canonical_address(addr) )
>          return;
>  
> -    l4t = map_domain_page(_mfn(mfn));
> +    l4t = map_domain_page(mfn);
>      l4e = l4t[l4_table_offset(addr)];
>      unmap_domain_page(l4t);
> -    mfn = l4e_get_pfn(l4e);
> -    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
> -          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
> +    mfn = l4e_get_mfn(l4e);
> +    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +          get_pfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
>             l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
>      if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ||
> -         !mfn_valid(_mfn(mfn)) )
> +         !mfn_valid(mfn) )
>          return;
>  
> -    l3t = map_domain_page(_mfn(mfn));
> +    l3t = map_domain_page(mfn);
>      l3e = l3t[l3_table_offset(addr)];
>      unmap_domain_page(l3t);
> -    mfn = l3e_get_pfn(l3e);
> -    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
> -          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
> +    mfn = l3e_get_mfn(l3e);
> +    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +          get_pfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n",
>             l3_table_offset(addr), l3e_get_intpte(l3e), pfn,
>             (l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : "");
>      if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
>           (l3e_get_flags(l3e) & _PAGE_PSE) ||
> -         !mfn_valid(_mfn(mfn)) )
> +         !mfn_valid(mfn) )
>          return;
>  
> -    l2t = map_domain_page(_mfn(mfn));
> +    l2t = map_domain_page(mfn);
>      l2e = l2t[l2_table_offset(addr)];
>      unmap_domain_page(l2t);
> -    mfn = l2e_get_pfn(l2e);
> -    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
> -          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
> +    mfn = l2e_get_mfn(l2e);
> +    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +          get_pfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L2[0x%03lx] = %"PRIpte" %016lx%s\n",
>             l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
>             (l2e_get_flags(l2e) & _PAGE_PSE) ? " (PSE)" : "");
>      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
>           (l2e_get_flags(l2e) & _PAGE_PSE) ||
> -         !mfn_valid(_mfn(mfn)) )
> +         !mfn_valid(mfn) )
>          return;
>  
> -    l1t = map_domain_page(_mfn(mfn));
> +    l1t = map_domain_page(mfn);
>      l1e = l1t[l1_table_offset(addr)];
>      unmap_domain_page(l1t);
> -    mfn = l1e_get_pfn(l1e);
> -    pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
> -          get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
> +    mfn = l1e_get_mfn(l1e);
> +    pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
> +          get_pfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
>      printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
>             l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
>  }
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index d6a580da31..eecc9671ff 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -273,7 +273,7 @@ static void populate_physmap(struct memop_args *a)
>              if ( !paging_mode_translate(d) )
>              {
>                  for ( j = 0; j < (1U << a->extent_order); j++ )
> -                    set_gpfn_from_mfn(mfn_x(mfn_add(mfn, j)), gpfn + j);
> +                    set_pfn_from_mfn(mfn_add(mfn, j), gpfn + j);
>  
>                  /* Inform the domain of the new page's machine address. */ 
>                  if ( unlikely(__copy_mfn_to_guest_offset(a->extent_list, i,
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 6061cce24f..a100e03e2e 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1424,7 +1424,7 @@ static void free_heap_pages(
>  
>          /* This page is not a guest frame any more. */
>          page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
> -        set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
> +        set_pfn_from_mfn(mfn_add(mfn, + i), INVALID_M2P_ENTRY);
>  
>          if ( need_scrub )
>          {
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index a9cb98a6c7..3c03be3bca 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -322,7 +322,7 @@ struct page_info *get_page_from_gva(struct vcpu *v, 
> vaddr_t va,
>  #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
>  
>  /* We don't have a M2P on Arm */
> -#define set_gpfn_from_mfn(mfn, pfn) do { (void) (mfn), (void)(pfn); } while 
> (0)
> +static inline void set_pfn_from_mfn(mfn_t mfn, unsigned long pfn) {}
>  
>  /* Arch-specific portion of memory_op hypercall. */
>  long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
> diff --git a/xen/include/asm-x86/grant_table.h 
> b/xen/include/asm-x86/grant_table.h
> index 661228dd39..d731b9e49f 100644
> --- a/xen/include/asm-x86/grant_table.h
> +++ b/xen/include/asm-x86/grant_table.h
> @@ -41,7 +41,7 @@ static inline int replace_grant_host_mapping(uint64_t addr, 
> mfn_t frame,
>  #define gnttab_get_frame_gfn(gt, st, idx) ({                             \
>      mfn_t mfn_ = (st) ? gnttab_status_mfn(gt, idx)                       \
>                        : gnttab_shared_mfn(gt, idx);                      \
> -    unsigned long gpfn_ = get_gpfn_from_mfn(mfn_x(mfn_));                \
> +    unsigned long gpfn_ = get_pfn_from_mfn(mfn_);                        \
>      VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN;                        \
>  })
>  
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index 4721725c60..bce60619c3 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -492,22 +492,26 @@ extern struct domain *dom_xen, *dom_io, *dom_cow;       
> /* for vmcoreinfo */
>   */
>  extern bool machine_to_phys_mapping_valid;
>  
> -static inline void set_gpfn_from_mfn(unsigned long mfn, unsigned long pfn)
> +static inline void set_pfn_from_mfn(mfn_t mfn, unsigned long pfn)
>  {
> -    struct domain *d = page_get_owner(mfn_to_page(_mfn(mfn)));
> +    const unsigned long mfn_ = mfn_x(mfn);
> +    struct domain *d = page_get_owner(mfn_to_page(mfn));
>      unsigned long entry = (d && (d == dom_cow)) ? SHARED_M2P_ENTRY : pfn;
>  
>      if (!machine_to_phys_mapping_valid)
>          return;
>  
> -    if ( mfn >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 )
> -        compat_machine_to_phys_mapping[mfn] = entry;
> -    machine_to_phys_mapping[mfn] = entry;
> +    if ( mfn_ >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 
> )
> +        compat_machine_to_phys_mapping[mfn_] = entry;
> +    machine_to_phys_mapping[mfn_] = entry;
>  }
>  
>  extern struct rangeset *mmio_ro_ranges;
>  
> -#define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
> +static inline unsigned long get_pfn_from_mfn(mfn_t mfn)
> +{
> +    return machine_to_phys_mapping[mfn_x(mfn)];
> +}
>  
>  #define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) 
> >> 20))
>  #define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) 
> << 20))
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 0157568be9..07b7ec6db0 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -509,7 +509,7 @@ static inline struct page_info *get_page_from_gfn(
>  static inline gfn_t mfn_to_gfn(const struct domain *d, mfn_t mfn)
>  {
>      if ( paging_mode_translate(d) )
> -        return _gfn(get_gpfn_from_mfn(mfn_x(mfn)));
> +        return _gfn(get_pfn_from_mfn(mfn));
>      else
>          return _gfn(mfn_x(mfn));
>  }
> -- 
> 2.11.0
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.