[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 10/14] x86/mm: Switch {get, put}_gfn() infrastructure to using gfn_t



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 21 November 2018 13:21
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Roger Pau Monne
> <roger.pau@xxxxxxxxxx>; Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>; Tamas
> K Lengyel <tamas@xxxxxxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>;
> Tim (Xen.org) <tim@xxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Jun
> Nakajima <jun.nakajima@xxxxxxxxx>; Kevin Tian <kevin.tian@xxxxxxxxx>;
> Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>; Suravee Suthikulpanit
> <suravee.suthikulpanit@xxxxxxx>; Brian Woods <brian.woods@xxxxxxx>
> Subject: [PATCH 10/14] x86/mm: Switch {get,put}_gfn() infrastructure to
> using gfn_t
> 
> Seemingly, a majority of users either override the helpers anyway, or have
> an
> gfn_t in their hands.
> 
> Update the API, and adjust all users to match.
> 
> Doing this highlighted a gaping altp2m security hole in
> vmx_vcpu_update_vmfunc_ve(), which will need addressing now we can discuss
> the
> problem and options publicly.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> CC: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
> CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> CC: Tim Deegan <tim@xxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> CC: Kevin Tian <kevin.tian@xxxxxxxxx>
> CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> CC: Brian Woods <brian.woods@xxxxxxx>
> ---
>  xen/arch/x86/cpu/mcheck/mcaction.c        |  2 +-
>  xen/arch/x86/cpu/mcheck/mce.c             | 14 +++----
>  xen/arch/x86/cpu/mcheck/vmce.c            |  4 +-
>  xen/arch/x86/cpu/mcheck/vmce.h            |  2 +-
>  xen/arch/x86/debug.c                      |  6 +--
>  xen/arch/x86/domain.c                     | 19 ++++-----
>  xen/arch/x86/domctl.c                     |  8 ++--
>  xen/arch/x86/hvm/dm.c                     | 12 +++---
>  xen/arch/x86/hvm/emulate.c                | 16 ++++----
>  xen/arch/x86/hvm/grant_table.c            |  4 +-
>  xen/arch/x86/hvm/hvm.c                    | 25 ++++++------
>  xen/arch/x86/hvm/mtrr.c                   |  2 +-
>  xen/arch/x86/hvm/svm/svm.c                |  2 +-
>  xen/arch/x86/hvm/vmx/vmx.c                |  7 ++--
>  xen/arch/x86/mm.c                         | 10 ++---
>  xen/arch/x86/mm/hap/hap.c                 |  2 +-
>  xen/arch/x86/mm/hap/nested_hap.c          |  6 +--
>  xen/arch/x86/mm/mem_access.c              |  5 +--
>  xen/arch/x86/mm/mem_sharing.c             | 24 +++++------
>  xen/arch/x86/mm/p2m.c                     | 45 ++++++++++----------
>  xen/arch/x86/mm/shadow/common.c           |  4 +-
>  xen/arch/x86/mm/shadow/multi.c            | 68 +++++++++++++++-----------
> -----
>  xen/arch/x86/mm/shadow/types.h            |  4 --
>  xen/common/grant_table.c                  | 10 ++---
>  xen/common/memory.c                       | 24 +++++------
>  xen/drivers/passthrough/amd/iommu_guest.c |  8 ----
>  xen/include/asm-x86/guest_pt.h            |  4 --
>  xen/include/asm-x86/p2m.h                 | 30 +++++++-------
>  28 files changed, 172 insertions(+), 195 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c
> b/xen/arch/x86/cpu/mcheck/mcaction.c
> index e422674..c8e0cf2 100644
> --- a/xen/arch/x86/cpu/mcheck/mcaction.c
> +++ b/xen/arch/x86/cpu/mcheck/mcaction.c
> @@ -89,7 +89,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
>                  ASSERT(d);
>                  gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT);
> 
> -                if ( unmmap_broken_page(d, _mfn(mfn), gfn) )
> +                if ( unmmap_broken_page(d, _mfn(mfn), _gfn(gfn)) )
>                  {
>                      printk("Unmap broken memory %lx for DOM%d failed\n",
>                             mfn, d->domain_id);
> diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
> index 30cdb06..c96c053 100644
> --- a/xen/arch/x86/cpu/mcheck/mce.c
> +++ b/xen/arch/x86/cpu/mcheck/mce.c
> @@ -1469,9 +1469,6 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t)
> u_xen_mc)
>              struct domain *d;
>              struct mcinfo_msr *msr;
>              unsigned int i;
> -            paddr_t gaddr;
> -            unsigned long gfn, mfn;
> -            p2m_type_t t;
> 
>              domid = (mc_msrinject->mcinj_domid == DOMID_SELF) ?
>                      current->domain->domain_id : mc_msrinject-
> >mcinj_domid;
> @@ -1489,11 +1486,12 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t)
> u_xen_mc)
>                    i < mc_msrinject->mcinj_count;
>                    i++, msr++ )
>              {
> -                gaddr = msr->value;
> -                gfn = PFN_DOWN(gaddr);
> -                mfn = mfn_x(get_gfn(d, gfn, &t));
> +                p2m_type_t t;
> +                paddr_t gaddr = msr->value;
> +                gfn_t gfn = _gfn(PFN_DOWN(gaddr));
> +                mfn_t mfn = get_gfn(d, gfn, &t);
> 
> -                if ( mfn == mfn_x(INVALID_MFN) )
> +                if ( mfn_eq(mfn, INVALID_MFN) )
>                  {
>                      put_gfn(d, gfn);
>                      put_domain(d);
> @@ -1501,7 +1499,7 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t)
> u_xen_mc)
>                                       -EINVAL, gfn, domid);
>                  }
> 
> -                msr->value = pfn_to_paddr(mfn) | (gaddr & (PAGE_SIZE -
> 1));
> +                msr->value = mfn_to_maddr(mfn) | (gaddr & (PAGE_SIZE -
> 1));
> 
>                  put_gfn(d, gfn);
>              }
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c
> b/xen/arch/x86/cpu/mcheck/vmce.c
> index f15835e..e257e94 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -512,7 +512,7 @@ int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct
> domain *d,
>   * XXX following situation missed:
>   * PoD, Foreign mapped, Granted, Shared
>   */
> -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
> +int unmmap_broken_page(struct domain *d, mfn_t mfn, gfn_t gfn)
>  {
>      mfn_t r_mfn;
>      p2m_type_t pt;
> @@ -533,7 +533,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn,
> unsigned long gfn)
>      if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
>      {
>          ASSERT(mfn_eq(r_mfn, mfn));
> -        rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken);
> +        rc = p2m_change_type_one(d, gfn_x(gfn), pt, p2m_ram_broken);
>      }
>      put_gfn(d, gfn);
> 
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.h
> b/xen/arch/x86/cpu/mcheck/vmce.h
> index 2797e00..a37f3be 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.h
> +++ b/xen/arch/x86/cpu/mcheck/vmce.h
> @@ -9,7 +9,7 @@ int vmce_init(struct cpuinfo_x86 *c);
>          && hardware_domain->vcpu[0] \
>          && guest_enabled_event(hardware_domain->vcpu[0], VIRQ_MCA))
> 
> -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
> +int unmmap_broken_page(struct domain *d, mfn_t mfn, gfn_t gfn);
> 
>  int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val);
>  int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
> diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
> index a500df0..7c2dc8c 100644
> --- a/xen/arch/x86/debug.c
> +++ b/xen/arch/x86/debug.c
> @@ -58,7 +58,7 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int
> toaddr, gfn_t *gfn)
>          return INVALID_MFN;
>      }
> 
> -    mfn = get_gfn(dp, gfn_x(*gfn), &gfntype);
> +    mfn = get_gfn(dp, *gfn, &gfntype);
>      if ( p2m_is_readonly(gfntype) && toaddr )
>      {
>          DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
> @@ -70,7 +70,7 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int
> toaddr, gfn_t *gfn)
> 
>      if ( mfn_eq(mfn, INVALID_MFN) )
>      {
> -        put_gfn(dp, gfn_x(*gfn));
> +        put_gfn(dp, *gfn);
>          *gfn = INVALID_GFN;
>      }
> 
> @@ -189,7 +189,7 @@ static unsigned int dbg_rw_guest_mem(struct domain
> *dp, void * __user gaddr,
> 
>          unmap_domain_page(va);
>          if ( !gfn_eq(gfn, INVALID_GFN) )
> -            put_gfn(dp, gfn_x(gfn));
> +            put_gfn(dp, gfn);
> 
>          addr += pagecnt;
>          buf += pagecnt;
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index b4d5948..327c961 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -677,7 +677,7 @@ int arch_domain_soft_reset(struct domain *d)
>      int ret = 0;
>      struct domain *owner;
>      mfn_t mfn;
> -    unsigned long gfn;
> +    gfn_t gfn;
>      p2m_type_t p2mt;
>      unsigned int i;
> 
> @@ -711,19 +711,19 @@ int arch_domain_soft_reset(struct domain *d)
>      ASSERT( owner == d );
> 
>      mfn = page_to_mfn(page);
> -    gfn = mfn_to_gmfn(d, mfn_x(mfn));
> +    gfn = _gfn(mfn_to_gmfn(d, mfn_x(mfn)));
> 
>      /*
>       * gfn == INVALID_GFN indicates that the shared_info page was never
> mapped
>       * to the domain's address space and there is nothing to replace.
>       */
> -    if ( gfn == gfn_x(INVALID_GFN) )
> +    if ( gfn_eq(gfn, INVALID_GFN) )
>          goto exit_put_page;
> 
>      if ( !mfn_eq(get_gfn_query(d, gfn, &p2mt), mfn) )
>      {
>          printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN
> (%lx)\n",
> -               d->domain_id, gfn);
> +               d->domain_id, gfn_x(gfn));
>          ret = -EINVAL;
>          goto exit_put_gfn;
>      }
> @@ -732,26 +732,25 @@ int arch_domain_soft_reset(struct domain *d)
>      if ( !new_page )
>      {
>          printk(XENLOG_G_ERR "Failed to alloc a page to replace"
> -               " Dom%d's shared_info frame %lx\n", d->domain_id, gfn);
> +               " Dom%d's shared_info frame %lx\n", d->domain_id,
> gfn_x(gfn));
>          ret = -ENOMEM;
>          goto exit_put_gfn;
>      }
> 
> -    ret = guest_physmap_remove_page(d, _gfn(gfn), mfn, PAGE_ORDER_4K);
> +    ret = guest_physmap_remove_page(d, gfn, mfn, PAGE_ORDER_4K);
>      if ( ret )
>      {
>          printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame
> %lx\n",
> -               d->domain_id, gfn);
> +               d->domain_id, gfn_x(gfn));
>          free_domheap_page(new_page);
>          goto exit_put_gfn;
>      }
> 
> -    ret = guest_physmap_add_page(d, _gfn(gfn), page_to_mfn(new_page),
> -                                 PAGE_ORDER_4K);
> +    ret = guest_physmap_add_page(d, gfn, page_to_mfn(new_page),
> PAGE_ORDER_4K);
>      if ( ret )
>      {
>          printk(XENLOG_G_ERR "Failed to add a page to replace"
> -               " Dom%d's shared_info frame %lx\n", d->domain_id, gfn);
> +               " Dom%d's shared_info frame %lx\n", d->domain_id,
> gfn_x(gfn));
>          free_domheap_page(new_page);
>      }
>   exit_put_gfn:
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index aa8ad19..694b4d5 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1253,15 +1253,15 @@ long arch_do_domctl(
>      case XEN_DOMCTL_set_broken_page_p2m:
>      {
>          p2m_type_t pt;
> -        unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
> -        mfn_t mfn = get_gfn_query(d, pfn, &pt);
> +        gfn_t gfn = _gfn(domctl->u.set_broken_page_p2m.pfn);
> +        mfn_t mfn = get_gfn_query(d, gfn, &pt);
> 
>          if ( unlikely(!mfn_valid(mfn)) || unlikely(!p2m_is_ram(pt)) )
>              ret = -EINVAL;
>          else
> -            ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
> +            ret = p2m_change_type_one(d, gfn_x(gfn), pt, p2m_ram_broken);
> 
> -        put_gfn(d, pfn);
> +        put_gfn(d, gfn);
>          break;
>      }
> 
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index d6d0e8b..9938f4b 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -269,14 +269,14 @@ static int set_mem_type(struct domain *d,
> 
>      while ( iter < data->nr )
>      {
> -        unsigned long pfn = data->first_pfn + iter;
> +        gfn_t gfn = _gfn(data->first_pfn + iter);
>          p2m_type_t t;
> 
> -        get_gfn_unshare(d, pfn, &t);
> +        get_gfn_unshare(d, gfn, &t);
>          if ( p2m_is_paging(t) )
>          {
> -            put_gfn(d, pfn);
> -            p2m_mem_paging_populate(d, pfn);
> +            put_gfn(d, gfn);
> +            p2m_mem_paging_populate(d, gfn_x(gfn));
>              return -EAGAIN;
>          }
> 
> @@ -285,9 +285,9 @@ static int set_mem_type(struct domain *d,
>          else if ( !allow_p2m_type_change(t, memtype[mem_type]) )
>              rc = -EINVAL;
>          else
> -            rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
> +            rc = p2m_change_type_one(d, gfn_x(gfn), t,
> memtype[mem_type]);
> 
> -        put_gfn(d, pfn);
> +        put_gfn(d, gfn);
> 
>          if ( rc )
>              break;
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 2d02ef1..1335b2c 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -255,13 +255,13 @@ static int hvmemul_do_io(
>           * so the device model side needs to check the incoming ioreq
> event.
>           */
>          struct hvm_ioreq_server *s = NULL;
> -        p2m_type_t p2mt = p2m_invalid;
> 
>          if ( is_mmio )
>          {
> -            unsigned long gmfn = paddr_to_pfn(addr);
> +            p2m_type_t p2mt = p2m_invalid;
> +            gfn_t gfn = gaddr_to_gfn(addr);
> 
> -            get_gfn_query_unlocked(currd, gmfn, &p2mt);
> +            get_gfn_query_unlocked(currd, gfn, &p2mt);
> 
>              if ( p2mt == p2m_ioreq_server )
>              {
> @@ -1590,7 +1590,7 @@ static int hvmemul_rep_ins(
>      if ( rc != X86EMUL_OKAY )
>          return rc;
> 
> -    (void) get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT,
> &p2mt);
> +    get_gfn_query_unlocked(current->domain, gaddr_to_gfn(gpa), &p2mt);
>      if ( p2mt == p2m_mmio_direct || p2mt == p2m_mmio_dm )
>          return X86EMUL_UNHANDLEABLE;
> 
> @@ -1671,7 +1671,7 @@ static int hvmemul_rep_outs(
>      if ( rc != X86EMUL_OKAY )
>          return rc;
> 
> -    (void) get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT,
> &p2mt);
> +    get_gfn_query_unlocked(current->domain, gaddr_to_gfn(gpa), &p2mt);
>      if ( p2mt == p2m_mmio_direct || p2mt == p2m_mmio_dm )
>          return X86EMUL_UNHANDLEABLE;
> 
> @@ -1750,8 +1750,8 @@ static int hvmemul_rep_movs(
>      }
> 
>      /* Check for MMIO ops */
> -    (void) get_gfn_query_unlocked(current->domain, sgpa >> PAGE_SHIFT,
> &sp2mt);
> -    (void) get_gfn_query_unlocked(current->domain, dgpa >> PAGE_SHIFT,
> &dp2mt);
> +    get_gfn_query_unlocked(current->domain, gaddr_to_gfn(sgpa), &sp2mt);
> +    get_gfn_query_unlocked(current->domain, gaddr_to_gfn(dgpa), &dp2mt);
> 
>      if ( sp2mt == p2m_mmio_direct || dp2mt == p2m_mmio_direct ||
>           (sp2mt == p2m_mmio_dm && dp2mt == p2m_mmio_dm) )
> @@ -1878,7 +1878,7 @@ static int hvmemul_rep_stos(
>      }
> 
>      /* Check for MMIO op */
> -    (void)get_gfn_query_unlocked(current->domain, gpa >> PAGE_SHIFT,
> &p2mt);
> +    get_gfn_query_unlocked(current->domain, gaddr_to_gfn(gpa), &p2mt);
> 
>      switch ( p2mt )
>      {
> diff --git a/xen/arch/x86/hvm/grant_table.c
> b/xen/arch/x86/hvm/grant_table.c
> index ecd7d07..04a3106 100644
> --- a/xen/arch/x86/hvm/grant_table.c
> +++ b/xen/arch/x86/hvm/grant_table.c
> @@ -51,7 +51,7 @@ int create_grant_p2m_mapping(uint64_t addr, mfn_t frame,
>  int replace_grant_p2m_mapping(uint64_t addr, mfn_t frame,
>                                uint64_t new_addr, unsigned int flags)
>  {
> -    unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
> +    gfn_t gfn = gaddr_to_gfn(addr);
>      p2m_type_t type;
>      mfn_t old_mfn;
>      struct domain *d = current->domain;
> @@ -68,7 +68,7 @@ int replace_grant_p2m_mapping(uint64_t addr, mfn_t
> frame,
>                   type, mfn_x(old_mfn), mfn_x(frame));
>          return GNTST_general_error;
>      }
> -    if ( guest_physmap_remove_page(d, _gfn(gfn), frame, PAGE_ORDER_4K) )
> +    if ( guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K) )
>      {
>          put_gfn(d, gfn);
>          return GNTST_general_error;
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index db60f23..987c26a 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1679,7 +1679,7 @@ void hvm_inject_event(const struct x86_event *event)
>  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>                                struct npfec npfec)
>  {
> -    unsigned long gfn = gpa >> PAGE_SHIFT;
> +    gfn_t gfn = gaddr_to_gfn(gpa);
>      p2m_type_t p2mt;
>      p2m_access_t p2ma;
>      mfn_t mfn;
> @@ -1729,7 +1729,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
>              return 1;
>          case NESTEDHVM_PAGEFAULT_L0_ERROR:
>              /* gpa is now translated to l1 guest address, update gfn. */
> -            gfn = gpa >> PAGE_SHIFT;
> +            gfn = gaddr_to_gfn(gpa);
>              break;
>          }
>      }
> @@ -1817,7 +1817,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
>              {
>                  bool_t sve;
> 
> -                p2m->get_entry(p2m, _gfn(gfn), &p2mt, &p2ma, 0, NULL,
> &sve);
> +                p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, &sve);
> 
>                  if ( !sve && altp2m_vcpu_emulate_ve(curr) )
>                  {
> @@ -1862,7 +1862,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
>      {
>          ASSERT(p2m_is_hostp2m(p2m));
>          sharing_enomem =
> -            (mem_sharing_unshare_page(currd, gfn, 0) < 0);
> +            (mem_sharing_unshare_page(currd, gfn_x(gfn), 0) < 0);
>          rc = 1;
>          goto out_put_gfn;
>      }
> @@ -1878,7 +1878,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
>           */
>          if ( npfec.write_access )
>          {
> -            paging_mark_pfn_dirty(currd, _pfn(gfn));
> +            paging_mark_pfn_dirty(currd, _pfn(gfn_x(gfn)));
>              /*
>               * If p2m is really an altp2m, unlock it before changing the
> type,
>               * as p2m_altp2m_propagate_change() needs to acquire the
> @@ -1886,7 +1886,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
> long gla,
>               */
>              if ( p2m != hostp2m )
>                  __put_gfn(p2m, gfn);
> -            p2m_change_type_one(currd, gfn, p2m_ram_logdirty,
> p2m_ram_rw);
> +            p2m_change_type_one(currd, gfn_x(gfn), p2m_ram_logdirty,
> p2m_ram_rw);
>              __put_gfn(hostp2m, gfn);
> 
>              goto out;
> @@ -1916,16 +1916,16 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
> unsigned long gla,
>       * sleep on event ring wait queues, and we must not hold
>       * locks in such circumstance */
>      if ( paged )
> -        p2m_mem_paging_populate(currd, gfn);
> +        p2m_mem_paging_populate(currd, gfn_x(gfn));
>      if ( sharing_enomem )
>      {
>          int rv;
> 
> -        if ( (rv = mem_sharing_notify_enomem(currd, gfn, true)) < 0 )
> +        if ( (rv = mem_sharing_notify_enomem(currd, gfn_x(gfn), true)) <
> 0 )
>          {
>              gdprintk(XENLOG_ERR, "Domain %hu attempt to unshare "
>                       "gfn %lx, ENOMEM and no helper (rc %d)\n",
> -                     currd->domain_id, gfn, rv);
> +                     currd->domain_id, gfn_x(gfn), rv);
>              /* Crash the domain */
>              rc = 0;
>          }
> @@ -4601,8 +4601,9 @@ static int do_altp2m_op(
>          v = d->vcpu[a.u.enable_notify.vcpu_id];
> 
>          if ( !gfn_eq(vcpu_altp2m(v).veinfo_gfn, INVALID_GFN) ||
> -             mfn_eq(get_gfn_query_unlocked(v->domain,
> -                    a.u.enable_notify.gfn, &p2mt), INVALID_MFN) )
> +             mfn_eq(get_gfn_query_unlocked(
> +                        v->domain, _gfn(a.u.enable_notify.gfn), &p2mt),
> +                    INVALID_MFN) )
>          {
>              rc = -EINVAL;
>              break;
> @@ -4866,7 +4867,7 @@ static int hvmop_get_mem_type(
>       * type, not in allocating or unsharing. That'll happen
>       * on access.
>       */
> -    get_gfn_query_unlocked(d, a.pfn, &t);
> +    get_gfn_query_unlocked(d, _gfn(a.pfn), &t);
>      if ( p2m_is_mmio(t) )
>          a.mem_type =  HVMMEM_mmio_dm;
>      else if ( t == p2m_ioreq_server )
> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
> index b8fa340..f553e4d 100644
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -366,7 +366,7 @@ uint32_t get_pat_flags(struct vcpu *v,
>      {
>          struct domain *d = v->domain;
>          p2m_type_t p2mt;
> -        get_gfn_query_unlocked(d, paddr_to_pfn(gpaddr), &p2mt);
> +        get_gfn_query_unlocked(d, gaddr_to_gfn(gpaddr), &p2mt);
>          if (p2m_is_ram(p2mt))
>              gdprintk(XENLOG_WARNING,
>                      "Conflict occurs for a given guest l1e flags:%x "
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index b9a8900..46f9893 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1762,7 +1762,7 @@ static void svm_do_nested_pgfault(struct vcpu *v,
>      struct cpu_user_regs *regs, uint64_t pfec, paddr_t gpa)
>  {
>      int ret;
> -    unsigned long gfn = gpa >> PAGE_SHIFT;
> +    gfn_t gfn = gaddr_to_gfn(gpa);
>      mfn_t mfn;
>      p2m_type_t p2mt;
>      p2m_access_t p2ma;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 365eeb2..b5370dd 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2199,7 +2199,8 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu
> *v)
>              p2m_type_t t;
>              mfn_t mfn;
> 
> -            mfn = get_gfn_query_unlocked(d,
> gfn_x(vcpu_altp2m(v).veinfo_gfn), &t);
> +            /* TODO: This is a security issue... */
> +            mfn = get_gfn_query_unlocked(d, vcpu_altp2m(v).veinfo_gfn,
> &t);
> 
>              if ( !mfn_eq(mfn, INVALID_MFN) )
>              {
> @@ -3328,7 +3329,7 @@ static void ept_handle_violation(ept_qual_t q,
> paddr_t gpa)
> 
>          _d.gpa = gpa;
>          _d.qualification = q.raw;
> -        _d.mfn = mfn_x(get_gfn_query_unlocked(d, gfn, &_d.p2mt));
> +        _d.mfn = mfn_x(get_gfn_query_unlocked(d, _gfn(gfn), &_d.p2mt));
> 
>          __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
>      }
> @@ -3358,7 +3359,7 @@ static void ept_handle_violation(ept_qual_t q,
> paddr_t gpa)
>      }
> 
>      /* Everything else is an error. */
> -    mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
> +    mfn = get_gfn_query_unlocked(d, _gfn(gfn), &p2mt);
>      gprintk(XENLOG_ERR,
>              "EPT violation %#lx (%c%c%c/%c%c%c) gpa %#"PRIpaddr" mfn %#lx
> type %i\n",
>              q.raw,
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 28a0030..c3c7628 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4364,11 +4364,11 @@ int xenmem_add_to_physmap_one(
>              p2m_type_t p2mt;
> 
>              gfn = idx;
> -            mfn = get_gfn_unshare(d, gfn, &p2mt);
> +            mfn = get_gfn_unshare(d, _gfn(gfn), &p2mt);
>              /* If the page is still shared, exit early */
>              if ( p2m_is_shared(p2mt) )
>              {
> -                put_gfn(d, gfn);
> +                put_gfn(d, _gfn(gfn));
>                  return -ENOMEM;
>              }
>              page = get_page_from_mfn(mfn, d);
> @@ -4389,7 +4389,7 @@ int xenmem_add_to_physmap_one(
>      }
> 
>      /* Remove previously mapped page if it was present. */
> -    prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
> +    prev_mfn = mfn_x(get_gfn(d, gpfn, &p2mt));
>      if ( mfn_valid(_mfn(prev_mfn)) )
>      {
>          if ( is_xen_heap_mfn(prev_mfn) )
> @@ -4400,7 +4400,7 @@ int xenmem_add_to_physmap_one(
>              rc = guest_remove_page(d, gfn_x(gpfn));
>      }
>      /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page.
> */
> -    put_gfn(d, gfn_x(gpfn));
> +    put_gfn(d, gpfn);
> 
>      if ( rc )
>          goto put_both;
> @@ -4423,7 +4423,7 @@ int xenmem_add_to_physmap_one(
>   put_both:
>      /* In the XENMAPSPACE_gmfn case, we took a ref of the gfn at the top.
> */
>      if ( space == XENMAPSPACE_gmfn )
> -        put_gfn(d, gfn);
> +        put_gfn(d, _gfn(gfn));
> 
>      if ( page )
>          put_page(page);
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index 3d651b9..0ac7d10 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -681,7 +681,7 @@ hap_paging_get_mode(struct vcpu *v)
>  static void hap_update_paging_modes(struct vcpu *v)
>  {
>      struct domain *d = v->domain;
> -    unsigned long cr3_gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
> +    gfn_t cr3_gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
>      p2m_type_t t;
> 
>      /* We hold onto the cr3 as it may be modified later, and
> diff --git a/xen/arch/x86/mm/hap/nested_hap.c
> b/xen/arch/x86/mm/hap/nested_hap.c
> index d2a07a5..d83c436 100644
> --- a/xen/arch/x86/mm/hap/nested_hap.c
> +++ b/xen/arch/x86/mm/hap/nested_hap.c
> @@ -150,12 +150,12 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m,
> paddr_t L1_gpa, paddr_t *L0_gpa,
>                        unsigned int *page_order,
>                        bool_t access_r, bool_t access_w, bool_t access_x)
>  {
> +    gfn_t l1_gfn = gaddr_to_gfn(L1_gpa);
>      mfn_t mfn;
>      int rc;
> 
>      /* walk L0 P2M table */
> -    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, p2mt, p2ma,
> -                              0, page_order);
> +    mfn = get_gfn_type_access(p2m, l1_gfn, p2mt, p2ma, 0, page_order);
> 
>      rc = NESTEDHVM_PAGEFAULT_DIRECT_MMIO;
>      if ( *p2mt == p2m_mmio_direct )
> @@ -178,7 +178,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t
> L1_gpa, paddr_t *L0_gpa,
>  direct_mmio_out:
>      *L0_gpa = (mfn_x(mfn) << PAGE_SHIFT) + (L1_gpa & ~PAGE_MASK);
>  out:
> -    __put_gfn(p2m, L1_gpa >> PAGE_SHIFT);
> +    __put_gfn(p2m, l1_gfn);
>      return rc;
>  }
> 
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index 30c2f1a..caa33c3 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -263,7 +263,6 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
> p2m_domain *hp2m,
>      p2m_type_t t;
>      p2m_access_t old_a;
>      unsigned int page_order;
> -    unsigned long gfn_l = gfn_x(gfn);
>      int rc;
> 
>      mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL);
> @@ -272,7 +271,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
> p2m_domain *hp2m,
>      if ( !mfn_valid(mfn) )
>      {
> 
> -        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
> +        mfn = __get_gfn_type_access(hp2m, gfn, &t, &old_a,
>                                      P2M_ALLOC | P2M_UNSHARE, &page_order,
> 0);
> 
>          rc = -ESRCH;
> @@ -283,7 +282,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct
> p2m_domain *hp2m,
>          if ( page_order != PAGE_ORDER_4K )
>          {
>              unsigned long mask = ~((1UL << page_order) - 1);
> -            gfn_t gfn2 = _gfn(gfn_l & mask);
> +            gfn_t gfn2 = _gfn(gfn_x(gfn) & mask);
>              mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
> 
>              rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a,
> 1);
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index 573d354..7f2bf80 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -499,7 +499,7 @@ static int audit(void)
>                  errors++;
>                  continue;
>              }
> -            o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
> +            o_mfn = get_gfn_query_unlocked(d, _gfn(g->gfn), &t);
>              if ( !mfn_eq(o_mfn, mfn) )
>              {
>                  MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
> @@ -732,12 +732,12 @@ static int debug_gfn(struct domain *d, gfn_t gfn)
>      mfn_t mfn;
>      int num_refs;
> 
> -    mfn = get_gfn_query(d, gfn_x(gfn), &p2mt);
> +    mfn = get_gfn_query(d, gfn, &p2mt);
> 
>      MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
>                        d->domain_id, gfn_x(gfn));
>      num_refs = debug_mfn(mfn);
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
> 
>      return num_refs;
>  }
> @@ -775,7 +775,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
> 
>      *phandle = 0UL;
> 
> -    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma, 0, NULL);
> +    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, 0, NULL);
> 
>      /* Check if mfn is valid */
>      ret = -EINVAL;
> @@ -820,7 +820,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
>              if ( !ap2m )
>                  continue;
> 
> -            amfn = __get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt,
> &ap2ma,
> +            amfn = __get_gfn_type_access(ap2m, gfn, &ap2mt, &ap2ma,
>                                           0, NULL, false);
>              if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma)
> )
>              {
> @@ -885,7 +885,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
>      ret = 0;
> 
>  out:
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
>      return ret;
>  }
> 
> @@ -1124,11 +1124,11 @@ int __mem_sharing_unshare_page(struct domain *d,
>      int last_gfn;
>      gfn_info_t *gfn_info = NULL;
> 
> -    mfn = get_gfn(d, gfn, &p2mt);
> +    mfn = get_gfn(d, _gfn(gfn), &p2mt);
> 
>      /* Has someone already unshared it? */
>      if ( !p2m_is_shared(p2mt) ) {
> -        put_gfn(d, gfn);
> +        put_gfn(d, _gfn(gfn));
>          return 0;
>      }
> 
> @@ -1175,7 +1175,7 @@ int __mem_sharing_unshare_page(struct domain *d,
>          {
>              if ( !get_page(page, d) )
>              {
> -                put_gfn(d, gfn);
> +                put_gfn(d, _gfn(gfn));
>                  domain_crash(d);
>                  return -EOVERFLOW;
>              }
> @@ -1183,7 +1183,7 @@ int __mem_sharing_unshare_page(struct domain *d,
>                  put_page(page);
>              put_page(page);
>          }
> -        put_gfn(d, gfn);
> +        put_gfn(d, _gfn(gfn));
> 
>          return 0;
>      }
> @@ -1202,7 +1202,7 @@ int __mem_sharing_unshare_page(struct domain *d,
>          /* Undo dec of nr_saved_mfns, as the retry will decrease again.
> */
>          atomic_inc(&nr_saved_mfns);
>          mem_sharing_page_unlock(old_page);
> -        put_gfn(d, gfn);
> +        put_gfn(d, _gfn(gfn));
>          /* Caller is responsible for placing an event
>           * in the ring */
>          return -ENOMEM;
> @@ -1230,7 +1230,7 @@ int __mem_sharing_unshare_page(struct domain *d,
>       * marking dirty is feasible */
>      paging_mark_dirty(d, page_to_mfn(page));
>      /* We do not need to unlock a private page */
> -    put_gfn(d, gfn);
> +    put_gfn(d, _gfn(gfn));
>      return 0;
>  }
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index f52a71e..19b383f 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -409,12 +409,11 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain
> *p2m)
>          mm_write_unlock(&p2m->lock);
>  }
> 
> -mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
> +mfn_t __get_gfn_type_access(struct p2m_domain *p2m, gfn_t gfn,
>                      p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
>                      unsigned int *page_order, bool_t locked)
>  {
>      mfn_t mfn;
> -    gfn_t gfn = _gfn(gfn_l);
> 
>      /* Unshare makes no sense withuot populate. */
>      if ( q & P2M_UNSHARE )
> @@ -425,7 +424,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m,
> unsigned long gfn_l,
>          /* Not necessarily true, but for non-translated guests, we claim
>           * it's the most generic kind of memory */
>          *t = p2m_ram_rw;
> -        return _mfn(gfn_l);
> +        return _mfn(gfn_x(gfn));
>      }
> 
>      if ( locked )
> @@ -439,8 +438,8 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m,
> unsigned long gfn_l,
>          ASSERT(p2m_is_hostp2m(p2m));
>          /* Try to unshare. If we fail, communicate ENOMEM without
>           * sleeping. */
> -        if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
> -            mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
> +        if ( mem_sharing_unshare_page(p2m->domain, gfn_x(gfn), 0) < 0 )
> +            mem_sharing_notify_enomem(p2m->domain, gfn_x(gfn), false);
>          mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
>      }
> 
> @@ -455,7 +454,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m,
> unsigned long gfn_l,
>      return mfn;
>  }
> 
> -void __put_gfn(struct p2m_domain *p2m, unsigned long gfn)
> +void __put_gfn(struct p2m_domain *p2m, gfn_t gfn)
>  {
>      if ( !p2m || !paging_mode_translate(p2m->domain) )
>          /* Nothing to do in this case */
> @@ -484,7 +483,7 @@ struct page_info *p2m_get_page_from_gfn(
>      {
>          /* Fast path: look up and get out */
>          p2m_read_lock(p2m);
> -        mfn = __get_gfn_type_access(p2m, gfn_x(gfn), t, a, 0, NULL, 0);
> +        mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
>          if ( p2m_is_any_ram(*t) && mfn_valid(mfn)
>               && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
>          {
> @@ -513,14 +512,14 @@ struct page_info *p2m_get_page_from_gfn(
>      }
> 
>      /* Slow path: take the write lock and do fixups */
> -    mfn = get_gfn_type_access(p2m, gfn_x(gfn), t, a, q, NULL);
> +    mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL);
>      if ( p2m_is_ram(*t) && mfn_valid(mfn) )
>      {
>          page = mfn_to_page(mfn);
>          if ( !get_page(page, p2m->domain) )
>              page = NULL;
>      }
> -    put_gfn(p2m->domain, gfn_x(gfn));
> +    put_gfn(p2m->domain, gfn);
> 
>      return page;
>  }
> @@ -1278,7 +1277,7 @@ int set_shared_p2m_entry(struct domain *d, unsigned
> long gfn_l, mfn_t mfn)
>      if ( rc )
>          gdprintk(XENLOG_ERR,
>                   "p2m_set_entry failed! mfn=%08lx rc:%d\n",
> -                 mfn_x(get_gfn_query_unlocked(p2m->domain, gfn_l, &ot)),
> rc);
> +                 mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)),
> rc);
>      return rc;
>  }
> 
> @@ -2187,8 +2186,7 @@ bool p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t
> gpa,
> 
>      ASSERT(p2m_locked_by_me(hp2m));
> 
> -    mfn = get_gfn_type_access(ap2m, gfn_x(gfn), &p2mt, &p2ma,
> -                              0, &page_order);
> +    mfn = get_gfn_type_access(ap2m, gfn, &p2mt, &p2ma, 0, &page_order);
> 
>      /* Entry already present in ap2m?  Caller should handle the fault. */
>      if ( !mfn_eq(mfn, INVALID_MFN) )
> @@ -2197,8 +2195,7 @@ bool p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t
> gpa,
>          goto put_ap2m;
>      }
> 
> -    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
> -                              P2M_ALLOC, &page_order);
> +    mfn = get_gfn_type_access(hp2m, gfn, &p2mt, &p2ma, P2M_ALLOC,
> &page_order);
> 
>      /* Entry not present in hp2m?  Caller should handle the fault. */
>      if ( mfn_eq(mfn, INVALID_MFN) )
> @@ -2230,9 +2227,9 @@ bool p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t
> gpa,
>      ret = true;
> 
>  put_hp2m:
> -    __put_gfn(hp2m, gfn_x(gfn));
> +    __put_gfn(hp2m, gfn);
>  put_ap2m:
> -    __put_gfn(ap2m, gfn_x(gfn));
> +    __put_gfn(ap2m, gfn);
> 
>      return ret;
>  }
> @@ -2396,7 +2393,7 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned
> int idx,
>      /* Check host p2m if no valid entry in alternate */
>      if ( !mfn_valid(mfn) )
>      {
> -        mfn = __get_gfn_type_access(hp2m, gfn_x(old_gfn), &t, &a,
> +        mfn = __get_gfn_type_access(hp2m, old_gfn, &t, &a,
>                                      P2M_ALLOC, &page_order, 0);
> 
>          if ( !mfn_valid(mfn) || t != p2m_ram_rw )
> @@ -2477,7 +2474,7 @@ int p2m_altp2m_propagate_change(struct domain *d,
> gfn_t gfn,
>              continue;
> 
>          p2m = d->arch.altp2m_p2m[i];
> -        m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
> +        m = get_gfn_type_access(p2m, gfn, &t, &a, 0, NULL);
> 
>          /* Check for a dropped page that may impact this altp2m */
>          if ( mfn_eq(mfn, INVALID_MFN) &&
> @@ -2492,7 +2489,7 @@ int p2m_altp2m_propagate_change(struct domain *d,
> gfn_t gfn,
>              else
>              {
>                  /* At least 2 altp2m's impacted, so reset everything */
> -                __put_gfn(p2m, gfn_x(gfn));
> +                __put_gfn(p2m, gfn);
> 
>                  for ( i = 0; i < MAX_ALTP2M; i++ )
>                  {
> @@ -2519,7 +2516,7 @@ int p2m_altp2m_propagate_change(struct domain *d,
> gfn_t gfn,
>                  ret = rc;
>          }
> 
> -        __put_gfn(p2m, gfn_x(gfn));
> +        __put_gfn(p2m, gfn);
>      }
> 
>      altp2m_list_unlock(d);
> @@ -2590,7 +2587,7 @@ void audit_p2m(struct domain *d,
>              continue;
>          }
> 
> -        p2mfn = get_gfn_type_access(p2m, gfn, &type, &p2ma, 0, NULL);
> +        p2mfn = get_gfn_type_access(p2m, _gfn(gfn), &type, &p2ma, 0,
> NULL);
>          if ( mfn_x(p2mfn) != mfn )
>          {
>              mpbad++;
> @@ -2605,7 +2602,7 @@ void audit_p2m(struct domain *d,
>               * blow away the m2p entry. */
>              set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
>          }
> -        __put_gfn(p2m, gfn);
> +        __put_gfn(p2m, _gfn(gfn));
> 
>          P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n",
>                         mfn, gfn, mfn_x(p2mfn));
> @@ -2698,7 +2695,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned
> long fgfn,
>      mfn = page_to_mfn(page);
> 
>      /* Remove previously mapped page if it is present. */
> -    prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
> +    prev_mfn = get_gfn(tdom, _gfn(gpfn), &p2mt_prev);
>      if ( mfn_valid(prev_mfn) )
>      {
>          if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
> @@ -2729,7 +2726,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned
> long fgfn,
>       * after set_foreign_p2m_entry so another cpu doesn't populate the
> gpfn
>       * before us.
>       */
> -    put_gfn(tdom, gpfn);
> +    put_gfn(tdom, _gfn(gpfn));
> 
>  out:
>      if ( fdom )
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 61304d7..85ce761 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3338,7 +3338,7 @@ int shadow_track_dirty_vram(struct domain *d,
> 
>          /* Iterate over VRAM to track dirty bits. */
>          for ( i = 0; i < nr; i++ ) {
> -            mfn_t mfn = get_gfn_query_unlocked(d, begin_pfn + i, &t);
> +            mfn_t mfn = get_gfn_query_unlocked(d, _gfn(begin_pfn + i),
> &t);
>              struct page_info *page;
>              int dirty = 0;
>              paddr_t sl1ma = dirty_vram->sl1ma[i];
> @@ -3418,7 +3418,7 @@ int shadow_track_dirty_vram(struct domain *d,
>               * write access */
>              for ( i = begin_pfn; i < end_pfn; i++ )
>              {
> -                mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
> +                mfn_t mfn = get_gfn_query_unlocked(d, _gfn(i), &t);
>                  if ( !mfn_eq(mfn, INVALID_MFN) )
>                      flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
>              }
> diff --git a/xen/arch/x86/mm/shadow/multi.c
> b/xen/arch/x86/mm/shadow/multi.c
> index 7e9cbc6..de3fcd7 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -2126,7 +2126,8 @@ static int validate_gl4e(struct vcpu *v, void
> *new_ge, mfn_t sl4mfn, void *se)
>           !guest_l4e_rsvd_bits(v, new_gl4e) )
>      {
>          gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
> -        mfn_t gl3mfn = get_gfn_query_unlocked(d, gfn_x(gl3gfn), &p2mt);
> +        mfn_t gl3mfn = get_gfn_query_unlocked(d, gl3gfn, &p2mt);
> +
>          if ( p2m_is_ram(p2mt) )
>              sl3mfn = get_shadow_status(d, gl3mfn, SH_type_l3_shadow);
>          else if ( p2mt != p2m_populate_on_demand )
> @@ -2185,7 +2186,8 @@ static int validate_gl3e(struct vcpu *v, void
> *new_ge, mfn_t sl3mfn, void *se)
>           !guest_l3e_rsvd_bits(v, new_gl3e) )
>      {
>          gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
> -        mfn_t gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
> +        mfn_t gl2mfn = get_gfn_query_unlocked(d, gl2gfn, &p2mt);
> +
>          if ( p2m_is_ram(p2mt) )
>              sl2mfn = get_shadow_status(d, gl2mfn, SH_type_l2_shadow);
>          else if ( p2mt != p2m_populate_on_demand )
> @@ -2236,7 +2238,8 @@ static int validate_gl2e(struct vcpu *v, void
> *new_ge, mfn_t sl2mfn, void *se)
>          }
>          else
>          {
> -            mfn_t gl1mfn = get_gfn_query_unlocked(d, gfn_x(gl1gfn),
> &p2mt);
> +            mfn_t gl1mfn = get_gfn_query_unlocked(d, gl1gfn, &p2mt);
> +
>              if ( p2m_is_ram(p2mt) )
>                  sl1mfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
>              else if ( p2mt != p2m_populate_on_demand )
> @@ -2270,7 +2273,7 @@ static int validate_gl1e(struct vcpu *v, void
> *new_ge, mfn_t sl1mfn, void *se)
>           !guest_l1e_rsvd_bits(v, new_gl1e) )
>      {
>          gfn = guest_l1e_get_gfn(new_gl1e);
> -        gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
> +        gmfn = get_gfn_query_unlocked(d, gfn, &p2mt);
>      }
> 
>      l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch,
> p2mt);
> @@ -2335,7 +2338,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn,
> mfn_t snpmfn)
>                   !guest_l1e_rsvd_bits(v, gl1e) )
>              {
>                  gfn = guest_l1e_get_gfn(gl1e);
> -                gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
> +                gmfn = get_gfn_query_unlocked(d, gfn, &p2mt);
>              }
> 
>              l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch,
> p2mt);
> @@ -2615,7 +2618,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
>               !guest_l1e_rsvd_bits(v, gl1e) )
>          {
>              gfn = guest_l1e_get_gfn(gl1e);
> -            gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
> +            gmfn = get_gfn_query_unlocked(d, gfn, &p2mt);
>          }
>          else
>          {
> @@ -3071,7 +3074,7 @@ static int sh_page_fault(struct vcpu *v,
>          SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
>                        gfn_x(gfn), mfn_x(gmfn));
>          sh_reset_early_unshadow(v);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          goto propagate;
>      }
> 
> @@ -3117,7 +3120,7 @@ static int sh_page_fault(struct vcpu *v,
>      if ( rc & GW_RMWR_REWALK )
>      {
>          paging_unlock(d);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          goto rewalk;
>      }
>  #endif /* OOS */
> @@ -3126,7 +3129,7 @@ static int sh_page_fault(struct vcpu *v,
>      {
>          perfc_incr(shadow_inconsistent_gwalk);
>          paging_unlock(d);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          goto rewalk;
>      }
> 
> @@ -3153,7 +3156,7 @@ static int sh_page_fault(struct vcpu *v,
>          ASSERT(d->is_shutting_down);
>  #endif
>          paging_unlock(d);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          trace_shadow_gen(TRC_SHADOW_DOMF_DYING, va);
>          return 0;
>      }
> @@ -3171,7 +3174,7 @@ static int sh_page_fault(struct vcpu *v,
>           * failed. We cannot safely continue since some page is still
>           * OOS but not in the hash table anymore. */
>          paging_unlock(d);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          return 0;
>      }
> 
> @@ -3181,7 +3184,7 @@ static int sh_page_fault(struct vcpu *v,
>      {
>          perfc_incr(shadow_inconsistent_gwalk);
>          paging_unlock(d);
> -        put_gfn(d, gfn_x(gfn));
> +        put_gfn(d, gfn);
>          goto rewalk;
>      }
>  #endif /* OOS */
> @@ -3267,7 +3270,7 @@ static int sh_page_fault(struct vcpu *v,
>      SHADOW_PRINTK("fixed\n");
>      shadow_audit_tables(v);
>      paging_unlock(d);
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
>      return EXCRET_fault_fixed;
> 
>   emulate:
> @@ -3337,7 +3340,7 @@ static int sh_page_fault(struct vcpu *v,
>      sh_audit_gw(v, &gw);
>      shadow_audit_tables(v);
>      paging_unlock(d);
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
> 
>      this_cpu(trace_emulate_write_val) = 0;
> 
> @@ -3521,7 +3524,7 @@ static int sh_page_fault(struct vcpu *v,
>      shadow_audit_tables(v);
>      sh_reset_early_unshadow(v);
>      paging_unlock(d);
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
>      trace_shadow_gen(TRC_SHADOW_MMIO, va);
>      return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
>              ? EXCRET_fault_fixed : 0);
> @@ -3535,7 +3538,7 @@ static int sh_page_fault(struct vcpu *v,
>      shadow_audit_tables(v);
>      sh_reset_early_unshadow(v);
>      paging_unlock(d);
> -    put_gfn(d, gfn_x(gfn));
> +    put_gfn(d, gfn);
> 
>  propagate:
>      trace_not_shadow_fault(gw.l1e, va);
> @@ -4116,7 +4119,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool
> noflush)
>              if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
>              {
>                  gl2gfn = guest_l3e_get_gfn(gl3e[i]);
> -                gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
> +                gl2mfn = get_gfn_query_unlocked(d, gl2gfn, &p2mt);
>                  if ( p2m_is_ram(p2mt) )
>                      flush |= sh_remove_write_access(d, gl2mfn, 2, 0);
>              }
> @@ -4129,7 +4132,7 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool
> noflush)
>              if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
>              {
>                  gl2gfn = guest_l3e_get_gfn(gl3e[i]);
> -                gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
> +                gl2mfn = get_gfn_query_unlocked(d, gl2gfn, &p2mt);
>                  if ( p2m_is_ram(p2mt) )
>                      sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
>                                             ? SH_type_l2h_shadow
> @@ -4536,7 +4539,7 @@ static void sh_pagetable_dying(paddr_t gpa)
>      p2m_type_t p2mt;
>      char *gl3pa = NULL;
>      guest_l3e_t *gl3e = NULL;
> -    unsigned long l3gfn;
> +    gfn_t l3gfn;
>      mfn_t l3mfn;
> 
>      gcr3 = v->arch.hvm.guest_cr[3];
> @@ -4544,8 +4547,8 @@ static void sh_pagetable_dying(paddr_t gpa)
>      if ( gcr3 == gpa )
>          fast_path = 1;
> 
> -    l3gfn = gpa >> PAGE_SHIFT;
> -    l3mfn = get_gfn_query(d, _gfn(l3gfn), &p2mt);
> +    l3gfn = gaddr_to_gfn(gpa);
> +    l3mfn = get_gfn_query(d, l3gfn, &p2mt);
>      if ( !mfn_valid(l3mfn) || !p2m_is_ram(p2mt) )
>      {
>          printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid
> %"PRIpaddr"\n",
> @@ -4573,8 +4576,7 @@ static void sh_pagetable_dying(paddr_t gpa)
>          else
>          {
>              /* retrieving the l2s */
> -            gmfn = get_gfn_query_unlocked(d,
> gfn_x(guest_l3e_get_gfn(gl3e[i])),
> -                                          &p2mt);
> +            gmfn = get_gfn_query_unlocked(d, guest_l3e_get_gfn(gl3e[i]),
> &p2mt);
>              smfn = unlikely(mfn_eq(gmfn, INVALID_MFN))
>                     ? INVALID_MFN
>                     : shadow_hash_lookup(d, mfn_x(gmfn),
> SH_type_l2_pae_shadow);
> @@ -4609,10 +4611,11 @@ static void sh_pagetable_dying(paddr_t gpa)
>  {
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
> +    gfn_t gfn = gaddr_to_gfn(gpa);
>      mfn_t smfn, gmfn;
>      p2m_type_t p2mt;
> 
> -    gmfn = get_gfn_query(d, _gfn(gpa >> PAGE_SHIFT), &p2mt);
> +    gmfn = get_gfn_query(d, gfn, &p2mt);
>      paging_lock(d);
> 
>  #if GUEST_PAGING_LEVELS == 2
> @@ -4637,7 +4640,7 @@ static void sh_pagetable_dying(paddr_t gpa)
>      v->arch.paging.shadow.pagetable_dying = 1;
> 
>      paging_unlock(d);
> -    put_gfn(d, gpa >> PAGE_SHIFT);
> +    put_gfn(d, gfn);
>  }
>  #endif
> 
> @@ -4759,7 +4762,7 @@ int sh_audit_l1_table(struct vcpu *v, mfn_t sl1mfn,
> mfn_t x)
>              {
>                  gfn = guest_l1e_get_gfn(*gl1e);
>                  mfn = shadow_l1e_get_mfn(*sl1e);
> -                gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn),
> &p2mt);
> +                gmfn = get_gfn_query_unlocked(v->domain, gfn, &p2mt);
>                  if ( !p2m_is_grant(p2mt) && !mfn_eq(gmfn, mfn) )
>                      AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
>                                 " --> %" PRI_mfn " != mfn %" PRI_mfn,
> @@ -4832,16 +4835,15 @@ int sh_audit_l2_table(struct vcpu *v, mfn_t
> sl2mfn, mfn_t x)
>              gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
>                  ? get_fl1_shadow_status(d, gfn)
>                  : get_shadow_status(d,
> -                    get_gfn_query_unlocked(d, gfn_x(gfn),
> -                                        &p2mt), SH_type_l1_shadow);
> +                    get_gfn_query_unlocked(d, gfn, &p2mt),
> SH_type_l1_shadow);
>              if ( !mfn_eq(gmfn, mfn) )
>                  AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
>                             " (--> %" PRI_mfn ")"
>                             " --> %" PRI_mfn " != mfn %" PRI_mfn,
>                             gfn_x(gfn),
>                             (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
> -                           : mfn_x(get_gfn_query_unlocked(d,
> -                                   gfn_x(gfn), &p2mt)), mfn_x(gmfn),
> mfn_x(mfn));
> +                           : mfn_x(get_gfn_query_unlocked(d, gfn,
> &p2mt)),
> +                           mfn_x(gmfn), mfn_x(mfn));
>          }
>      });
>      unmap_domain_page(gp);
> @@ -4881,8 +4883,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn,
> mfn_t x)
>          {
>              gfn = guest_l3e_get_gfn(*gl3e);
>              mfn = shadow_l3e_get_mfn(*sl3e);
> -            gmfn = get_shadow_status(d, get_gfn_query_unlocked(
> -                                        d, gfn_x(gfn), &p2mt),
> +            gmfn = get_shadow_status(d, get_gfn_query_unlocked(d, gfn,
> &p2mt),
>                                       ((GUEST_PAGING_LEVELS == 3 ||
>                                         is_pv_32bit_domain(d))
>                                        && !shadow_mode_external(d)
> @@ -4931,8 +4932,7 @@ int sh_audit_l4_table(struct vcpu *v, mfn_t sl4mfn,
> mfn_t x)
>          {
>              gfn = guest_l4e_get_gfn(*gl4e);
>              mfn = shadow_l4e_get_mfn(*sl4e);
> -            gmfn = get_shadow_status(d, get_gfn_query_unlocked(
> -                                     d, gfn_x(gfn), &p2mt),
> +            gmfn = get_shadow_status(d, get_gfn_query_unlocked(d, gfn,
> &p2mt),
>                                       SH_type_l3_shadow);
>              if ( !mfn_eq(gmfn, mfn) )
>                  AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
> diff --git a/xen/arch/x86/mm/shadow/types.h
> b/xen/arch/x86/mm/shadow/types.h
> index d509674..f688919 100644
> --- a/xen/arch/x86/mm/shadow/types.h
> +++ b/xen/arch/x86/mm/shadow/types.h
> @@ -193,10 +193,6 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t
> mfn, u32 flags)
>  })
>  #endif
> 
> - /* Override get_gfn to work with gfn_t */
> -#undef get_gfn_query
> -#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), 0)
> -
>  /* The shadow types needed for the various levels. */
> 
>  #if GUEST_PAGING_LEVELS == 2
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index f7860f6..54f909f 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -2099,7 +2099,7 @@ gnttab_transfer(
>          {
>              p2m_type_t p2mt;
> 
> -            mfn = get_gfn_unshare(d, gop.mfn, &p2mt);
> +            mfn = get_gfn_unshare(d, _gfn(gop.mfn), &p2mt);
>              if ( p2m_is_shared(p2mt) || !p2m_is_valid(p2mt) )
>                  mfn = INVALID_MFN;
>          }
> @@ -2111,7 +2111,7 @@ gnttab_transfer(
>          if ( unlikely(!mfn_valid(mfn)) )
>          {
>  #ifdef CONFIG_X86
> -            put_gfn(d, gop.mfn);
> +            put_gfn(d, _gfn(gop.mfn));
>  #endif
>              gdprintk(XENLOG_INFO, "out-of-range %lx\n", (unsigned
> long)gop.mfn);
>              gop.status = GNTST_bad_page;
> @@ -2122,7 +2122,7 @@ gnttab_transfer(
>          if ( (rc = steal_page(d, page, 0)) < 0 )
>          {
>  #ifdef CONFIG_X86
> -            put_gfn(d, gop.mfn);
> +            put_gfn(d, _gfn(gop.mfn));
>  #endif
>              gop.status = rc == -EINVAL ? GNTST_bad_page :
> GNTST_general_error;
>              goto copyback;
> @@ -2154,7 +2154,7 @@ gnttab_transfer(
>              rcu_unlock_domain(e);
>          put_gfn_and_copyback:
>  #ifdef CONFIG_X86
> -            put_gfn(d, gop.mfn);
> +            put_gfn(d, _gfn(gop.mfn));
>  #endif
>              page->count_info &= ~(PGC_count_mask|PGC_allocated);
>              free_domheap_page(page);
> @@ -2243,7 +2243,7 @@ gnttab_transfer(
> 
>          spin_unlock(&e->page_alloc_lock);
>  #ifdef CONFIG_X86
> -        put_gfn(d, gop.mfn);
> +        put_gfn(d, _gfn(gop.mfn));
>  #endif
> 
>          TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
> diff --git a/xen/common/memory.c b/xen/common/memory.c
> index 175bd62..c59a017 100644
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -303,10 +303,10 @@ int guest_remove_page(struct domain *d, unsigned
> long gmfn)
>      int rc;
> 
>  #ifdef CONFIG_X86
> -    mfn = get_gfn_query(d, gmfn, &p2mt);
> +    mfn = get_gfn_query(d, _gfn(gmfn), &p2mt);
>      if ( unlikely(p2mt == p2m_invalid) || unlikely(p2mt == p2m_mmio_dm) )
>      {
> -        put_gfn(d, gmfn);
> +        put_gfn(d, _gfn(gmfn));
> 
>          return -ENOENT;
>      }
> @@ -336,7 +336,7 @@ int guest_remove_page(struct domain *d, unsigned long
> gmfn)
>              goto out_put_gfn;
>          }
> 
> -        put_gfn(d, gmfn);
> +        put_gfn(d, _gfn(gmfn));
> 
>          if ( page )
>          {
> @@ -359,7 +359,7 @@ int guest_remove_page(struct domain *d, unsigned long
> gmfn)
>      if ( unlikely(!mfn_valid(mfn)) )
>      {
>  #ifdef CONFIG_X86
> -        put_gfn(d, gmfn);
> +        put_gfn(d, _gfn(gmfn));
>  #endif
>          gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
>                  d->domain_id, gmfn);
> @@ -382,7 +382,7 @@ int guest_remove_page(struct domain *d, unsigned long
> gmfn)
>              goto out_put_gfn;
>          }
>          /* Maybe the mfn changed */
> -        mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
> +        mfn = get_gfn_query_unlocked(d, _gfn(gmfn), &p2mt);
>          ASSERT(!p2m_is_shared(p2mt));
>      }
>  #endif /* CONFIG_X86 */
> @@ -391,7 +391,7 @@ int guest_remove_page(struct domain *d, unsigned long
> gmfn)
>      if ( unlikely(!get_page(page, d)) )
>      {
>  #ifdef CONFIG_X86
> -        put_gfn(d, gmfn);
> +        put_gfn(d, _gfn(gmfn));
>  #endif
>          gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d-
> >domain_id);
> 
> @@ -416,7 +416,7 @@ int guest_remove_page(struct domain *d, unsigned long
> gmfn)
> 
>  #ifdef CONFIG_X86
>   out_put_gfn:
> -    put_gfn(d, gmfn);
> +    put_gfn(d, _gfn(gmfn));
>  #endif
> 
>      /*
> @@ -651,10 +651,10 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
>                  p2m_type_t p2mt;
> 
>                  /* Shared pages cannot be exchanged */
> -                mfn = get_gfn_unshare(d, gmfn + k, &p2mt);
> +                mfn = get_gfn_unshare(d, _gfn(gmfn + k), &p2mt);
>                  if ( p2m_is_shared(p2mt) )
>                  {
> -                    put_gfn(d, gmfn + k);
> +                    put_gfn(d, _gfn(gmfn + k));
>                      rc = -ENOMEM;
>                      goto fail;
>                  }
> @@ -664,7 +664,7 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
>                  if ( unlikely(!mfn_valid(mfn)) )
>                  {
>  #ifdef CONFIG_X86
> -                    put_gfn(d, gmfn + k);
> +                    put_gfn(d, _gfn(gmfn + k));
>  #endif
>                      rc = -EINVAL;
>                      goto fail;
> @@ -676,14 +676,14 @@ static long
> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
>                  if ( unlikely(rc) )
>                  {
>  #ifdef CONFIG_X86
> -                    put_gfn(d, gmfn + k);
> +                    put_gfn(d, _gfn(gmfn + k));
>  #endif
>                      goto fail;
>                  }
> 
>                  page_list_add(page, &in_chunk_list);
>  #ifdef CONFIG_X86
> -                put_gfn(d, gmfn + k);
> +                put_gfn(d, _gfn(gmfn + k));
>  #endif
>              }
>          }
> diff --git a/xen/drivers/passthrough/amd/iommu_guest.c
> b/xen/drivers/passthrough/amd/iommu_guest.c
> index 03ca0cf..7ab3c77 100644
> --- a/xen/drivers/passthrough/amd/iommu_guest.c
> +++ b/xen/drivers/passthrough/amd/iommu_guest.c
> @@ -21,14 +21,6 @@
>  #include <asm/amd-iommu.h>
>  #include <asm/hvm/svm/amd-iommu-proto.h>
> 
> -/* Override {get,put}_gfn to work with gfn_t */
> -#undef get_gfn
> -#define get_gfn(d, g, t) get_gfn_type(d, gfn_x(g), t, P2M_ALLOC)
> -#undef get_gfn_query
> -#define get_gfn_query(d, g, t) get_gfn_type(d, gfn_x(g), t, 0)
> -#undef put_gfn
> -#define put_gfn(d, g) __put_gfn(p2m_get_hostp2m(d), gfn_x(g))
> -
>  #define IOMMU_MMIO_SIZE                         0x8000
>  #define IOMMU_MMIO_PAGE_NR                      0x8
>  #define RING_BF_LENGTH_MASK                     0x0F000000
> diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-
> x86/guest_pt.h
> index 8684b83..87c6815 100644
> --- a/xen/include/asm-x86/guest_pt.h
> +++ b/xen/include/asm-x86/guest_pt.h
> @@ -38,10 +38,6 @@ gfn_to_paddr(gfn_t gfn)
>      return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
>  }
> 
> -/* Override get_gfn to work with gfn_t */
> -#undef get_gfn
> -#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC)
> -
>  /* Mask covering the reserved bits from superpage alignment. */
>  #define SUPERPAGE_RSVD(bit)                                             \
>      (((1ul << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1ul)))
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 8bd6f64..e332f06 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -431,7 +431,7 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m);
>   * put_gfn. ****/
> 
>  mfn_t __nonnull(3, 4) __get_gfn_type_access(
> -    struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
> +    struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
>      p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t
> locked);
> 
>  /*
> @@ -444,7 +444,7 @@ mfn_t __nonnull(3, 4) __get_gfn_type_access(
>   * the entry was found in.
>   */
>  static inline mfn_t __nonnull(3, 4) get_gfn_type_access(
> -    struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
> +    struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t,
>      p2m_access_t *a, p2m_query_t q, unsigned int *page_order)
>  {
>      return __get_gfn_type_access(p2m, gfn, t, a, q, page_order, true);
> @@ -452,22 +452,21 @@ static inline mfn_t __nonnull(3, 4)
> get_gfn_type_access(
> 
>  /* General conversion function from gfn to mfn */
>  static inline mfn_t __nonnull(3) get_gfn_type(
> -    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
> +    struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
>  {
>      p2m_access_t a;
>      return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL);
>  }
> 
>  /* Syntactic sugar: most callers will use one of these. */
> -#define get_gfn(d, g, t)         get_gfn_type((d), (g), (t), P2M_ALLOC)
> -#define get_gfn_query(d, g, t)   get_gfn_type((d), (g), (t), 0)
> -#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
> -                                              P2M_ALLOC | P2M_UNSHARE)
> +#define get_gfn(d, g, t)         get_gfn_type(d, g, t, P2M_ALLOC)
> +#define get_gfn_query(d, g, t)   get_gfn_type(d, g, t, 0)
> +#define get_gfn_unshare(d, g, t) get_gfn_type(d, g, t, P2M_ALLOC |
> P2M_UNSHARE)
> 
>  /* Will release the p2m_lock for this gfn entry. */
> -void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
> +void __put_gfn(struct p2m_domain *p2m, gfn_t gfn);
> 
> -#define put_gfn(d, gfn) __put_gfn(p2m_get_hostp2m((d)), (gfn))
> +#define put_gfn(d, g) __put_gfn(p2m_get_hostp2m(d), g)
> 
>  /*
>   * The intent of the "unlocked" accessor is to have the caller not worry
> about
> @@ -484,9 +483,8 @@ void __put_gfn(struct p2m_domain *p2m, unsigned long
> gfn);
>   * Any other type of query can cause a change in the p2m and may need to
>   * perform locking.
>   */
> -static inline mfn_t get_gfn_query_unlocked(struct domain *d,
> -                                           unsigned long gfn,
> -                                           p2m_type_t *t)
> +static inline mfn_t get_gfn_query_unlocked(
> +    struct domain *d, gfn_t gfn, p2m_type_t *t)
>  {
>      p2m_access_t a;
>      return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL,
> 0);
> @@ -569,9 +567,9 @@ do {
> \
> 
>      /* Now do the gets */
>      *first_mfn  = get_gfn_type_access(p2m_get_hostp2m(rval-
> >first_domain),
> -                                      gfn_x(rval->first_gfn), first_t,
> first_a, q, NULL);
> +                                      rval->first_gfn, first_t, first_a,
> q, NULL);
>      *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval-
> >second_domain),
> -                                      gfn_x(rval->second_gfn), second_t,
> second_a, q, NULL);
> +                                      rval->second_gfn, second_t,
> second_a, q, NULL);
>  }
> 
>  static inline void put_two_gfns(struct two_gfns *arg)
> @@ -579,8 +577,8 @@ static inline void put_two_gfns(struct two_gfns *arg)
>      if ( !arg )
>          return;
> 
> -    put_gfn(arg->second_domain, gfn_x(arg->second_gfn));
> -    put_gfn(arg->first_domain,  gfn_x(arg->first_gfn));
> +    put_gfn(arg->second_domain, arg->second_gfn);
> +    put_gfn(arg->first_domain,  arg->first_gfn);
>  }
> 
>  /* Init the datastructures for later use by the p2m code */
> --
> 2.1.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.