WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Modify naming of queries into the p2m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Modify naming of queries into the p2m
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Sat, 12 Nov 2011 00:11:29 +0000
Delivery-date: Fri, 11 Nov 2011 16:12:42 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1321035094 0
# Node ID 3622d7fae14dfc2d00f378738ace3b65ee65b6cc
# Parent  9af6829a927379b6455a64ca49e769de34120800
Modify naming of queries into the p2m

Callers of lookups into the p2m code are now variants of get_gfn. All
callers need to call put_gfn. The code behind it is a no-op at the
moment, but will change to proper locking in a later patch.

This patch does not change functionality. Only naming, and adds
put_gfn's.

set_p2m_entry retains its name because it is always called with
p2m_lock held.

This patch is humongous, unfortunately, given the dozens of call sites
involved.

After this patch, anyone using old style gfn_to_mfn will not succeed
in compiling their code. This is on purpose: adapt to the new API.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/cpu/mcheck/vmce.c
--- a/xen/arch/x86/cpu/mcheck/vmce.c    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/cpu/mcheck/vmce.c    Fri Nov 11 18:11:34 2011 +0000
@@ -574,6 +574,7 @@
 {
     mfn_t r_mfn;
     p2m_type_t pt;
+    int rc;
 
     /* Always trust dom0's MCE handler will prevent future access */
     if ( d == dom0 )
@@ -585,14 +586,16 @@
     if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
         return -ENOSYS;
 
-    r_mfn = gfn_to_mfn_query(d, gfn, &pt);
+    rc = -1;
+    r_mfn = get_gfn_query(d, gfn, &pt);
     if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
     {
         ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
         p2m_change_type(d, gfn, pt, p2m_ram_broken);
-        return 0;
+        rc = 0;
     }
+    put_gfn(d, gfn);
 
-    return -1;
+    return rc;
 }
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/debug.c
--- a/xen/arch/x86/debug.c      Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/debug.c      Fri Nov 11 18:11:34 2011 +0000
@@ -43,22 +43,23 @@
 
 /* Returns: mfn for the given (hvm guest) vaddr */
 static unsigned long 
-dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr)
+dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr,
+                unsigned long *gfn)
 {
-    unsigned long mfn, gfn;
+    unsigned long mfn;
     uint32_t pfec = PFEC_page_present;
     p2m_type_t gfntype;
 
     DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
 
-    gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
-    if ( gfn == INVALID_GFN )
+    *gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
+    if ( *gfn == INVALID_GFN )
     {
         DBGP2("kdb:bad gfn from gva_to_gfn\n");
         return INVALID_MFN;
     }
 
-    mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype)); 
+    mfn = mfn_x(get_gfn(dp, *gfn, &gfntype)); 
     if ( p2m_is_readonly(gfntype) && toaddr )
     {
         DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
@@ -193,12 +194,12 @@
     while ( len > 0 )
     {
         char *va;
-        unsigned long mfn, pagecnt;
+        unsigned long mfn, gfn = INVALID_GFN, pagecnt;
 
         pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
 
         mfn = (dp->is_hvm
-               ? dbg_hvm_va2mfn(addr, dp, toaddr)
+               ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
                : dbg_pv_va2mfn(addr, dp, pgd3));
         if ( mfn == INVALID_MFN ) 
             break;
@@ -217,6 +218,8 @@
         }
 
         unmap_domain_page(va);
+        if ( gfn != INVALID_GFN )
+            put_gfn(dp, gfn);
 
         addr += pagecnt;
         buf += pagecnt;
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/domain.c     Fri Nov 11 18:11:34 2011 +0000
@@ -720,6 +720,7 @@
     struct vcpu *v, vcpu_guest_context_u c)
 {
     struct domain *d = v->domain;
+    unsigned long cr3_gfn;
     unsigned long cr3_pfn = INVALID_MFN;
     unsigned long flags, cr4;
     unsigned int i;
@@ -931,7 +932,8 @@
 
     if ( !compat )
     {
-        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
+        cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]);
+        cr3_pfn = get_gfn_untyped(d, cr3_gfn);
 
         if ( !mfn_valid(cr3_pfn) ||
              (paging_mode_refcounts(d)
@@ -939,16 +941,18 @@
               : !get_page_and_type(mfn_to_page(cr3_pfn), d,
                                    PGT_base_page_table)) )
         {
+            put_gfn(d, cr3_gfn);
             destroy_gdt(v);
             return -EINVAL;
         }
 
         v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
-
+        put_gfn(d, cr3_gfn);
 #ifdef __x86_64__
         if ( c.nat->ctrlreg[1] )
         {
-            cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
+            cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
+            cr3_pfn = get_gfn_untyped(d, cr3_gfn);
 
             if ( !mfn_valid(cr3_pfn) ||
                  (paging_mode_refcounts(d)
@@ -962,11 +966,13 @@
                     put_page(mfn_to_page(cr3_pfn));
                 else
                     put_page_and_type(mfn_to_page(cr3_pfn));
+                put_gfn(d, cr3_gfn); 
                 destroy_gdt(v);
                 return -EINVAL;
             }
 
             v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
+            put_gfn(d, cr3_gfn); 
         }
         else if ( !(flags & VGCF_in_kernel) )
         {
@@ -978,7 +984,8 @@
     {
         l4_pgentry_t *l4tab;
 
-        cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
+        cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]);
+        cr3_pfn = get_gfn_untyped(d, cr3_gfn);
 
         if ( !mfn_valid(cr3_pfn) ||
              (paging_mode_refcounts(d)
@@ -986,6 +993,7 @@
               : !get_page_and_type(mfn_to_page(cr3_pfn), d,
                                    PGT_l3_page_table)) )
         {
+            put_gfn(d, cr3_gfn); 
             destroy_gdt(v);
             return -EINVAL;
         }
@@ -993,6 +1001,7 @@
         l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
         *l4tab = l4e_from_pfn(
             cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
+        put_gfn(d, cr3_gfn); 
 #endif
     }
 
@@ -1058,11 +1067,12 @@
  * event doesn't get missed.
  */
 static int
-map_vcpu_info(struct vcpu *v, unsigned long mfn, unsigned offset)
+map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
 {
     struct domain *d = v->domain;
     void *mapping;
     vcpu_info_t *new_info;
+    unsigned long mfn;
     int i;
 
     if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
@@ -1075,15 +1085,19 @@
     if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) )
         return -EINVAL;
 
-    mfn = gmfn_to_mfn(d, mfn);
+    mfn = get_gfn_untyped(d, gfn);
     if ( !mfn_valid(mfn) ||
          !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+    {
+        put_gfn(d, gfn); 
         return -EINVAL;
+    }
 
     mapping = map_domain_page_global(mfn);
     if ( mapping == NULL )
     {
         put_page_and_type(mfn_to_page(mfn));
+        put_gfn(d, gfn); 
         return -ENOMEM;
     }
 
@@ -1113,6 +1127,7 @@
     for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ )
         set_bit(i, &vcpu_info(v, evtchn_pending_sel));
 
+    put_gfn(d, gfn); 
     return 0;
 }
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/domctl.c     Fri Nov 11 18:11:34 2011 +0000
@@ -199,7 +199,7 @@
 
                 for ( j = 0; j < k; j++ )
                 {
-                    unsigned long type = 0, mfn = gmfn_to_mfn(d, arr[j]);
+                    unsigned long type = 0, mfn = get_gfn_untyped(d, arr[j]);
 
                     page = mfn_to_page(mfn);
 
@@ -235,6 +235,7 @@
                         type = XEN_DOMCTL_PFINFO_XTAB;
 
                     arr[j] = type;
+                    put_gfn(d, arr[j]);
                 }
 
                 if ( copy_to_guest_offset(domctl->u.getpageframeinfo3.array,
@@ -299,7 +300,8 @@
             for ( j = 0; j < k; j++ )
             {      
                 struct page_info *page;
-                unsigned long mfn = gmfn_to_mfn(d, arr32[j]);
+                unsigned long gfn = arr32[j];
+                unsigned long mfn = get_gfn_untyped(d, gfn);
 
                 page = mfn_to_page(mfn);
 
@@ -310,8 +312,10 @@
                      unlikely(is_xen_heap_mfn(mfn)) )
                     arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
                 else if ( xsm_getpageframeinfo(page) != 0 )
+                {
+                    put_gfn(d, gfn); 
                     continue;
-                else if ( likely(get_page(page, d)) )
+                } else if ( likely(get_page(page, d)) )
                 {
                     unsigned long type = 0;
 
@@ -339,6 +343,7 @@
                 else
                     arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
 
+                put_gfn(d, gfn); 
             }
 
             if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
@@ -425,12 +430,13 @@
             break;
         }
 
-        mfn = gmfn_to_mfn(d, gmfn);
+        mfn = get_gfn_untyped(d, gmfn);
 
         ret = -EACCES;
         if ( !mfn_valid(mfn) ||
              !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
         {
+            put_gfn(d, gmfn); 
             rcu_unlock_domain(d);
             break;
         }
@@ -443,6 +449,7 @@
 
         put_page_and_type(mfn_to_page(mfn));
 
+        put_gfn(d, gmfn); 
         rcu_unlock_domain(d);
     }
     break;
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Fri Nov 11 18:11:34 2011 +0000
@@ -63,14 +63,18 @@
     int rc;
 
     /* Check for paged out page */
-    ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt);
+    ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(curr->domain, ram_gfn);
+        put_gfn(curr->domain, ram_gfn); 
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
+    {
+        put_gfn(curr->domain, ram_gfn); 
         return X86EMUL_RETRY;
+    }
 
     /*
      * Weird-sized accesses have undefined behaviour: we discard writes
@@ -82,6 +86,7 @@
         ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
         if ( dir == IOREQ_READ )
             memset(p_data, ~0, size);
+        put_gfn(curr->domain, ram_gfn); 
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -101,7 +106,10 @@
             paddr_t pa = vio->mmio_large_write_pa;
             unsigned int bytes = vio->mmio_large_write_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
+            {
+                put_gfn(curr->domain, ram_gfn); 
                 return X86EMUL_OKAY;
+            }
         }
         else
         {
@@ -111,6 +119,7 @@
             {
                 memcpy(p_data, &vio->mmio_large_read[addr - pa],
                        size);
+                put_gfn(curr->domain, ram_gfn); 
                 return X86EMUL_OKAY;
             }
         }
@@ -123,15 +132,22 @@
     case HVMIO_completed:
         vio->io_state = HVMIO_none;
         if ( p_data == NULL )
+        {
+            put_gfn(curr->domain, ram_gfn);
             return X86EMUL_UNHANDLEABLE;
+        }
         goto finish_access;
     case HVMIO_dispatched:
         /* May have to wait for previous cycle of a multi-write to complete. */
         if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) &&
              (addr == (vio->mmio_large_write_pa +
                        vio->mmio_large_write_bytes)) )
+        {
+            put_gfn(curr->domain, ram_gfn);
             return X86EMUL_RETRY;
+        }
     default:
+        put_gfn(curr->domain, ram_gfn);
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -139,6 +155,7 @@
     {
         gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
                  p->state);
+        put_gfn(curr->domain, ram_gfn); 
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -189,7 +206,10 @@
     }
 
     if ( rc != X86EMUL_OKAY )
+    {
+        put_gfn(curr->domain, ram_gfn); 
         return rc;
+    }
 
  finish_access:
     if ( p_data != NULL )
@@ -223,6 +243,7 @@
         }
     }
 
+    put_gfn(curr->domain, ram_gfn); 
     return X86EMUL_OKAY;
 }
 
@@ -671,12 +692,14 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    (void)gfn_to_mfn(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+    /* Unlocked works here because we get_gfn for real in whatever
+     * we call later. */
+    (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
 
-    (void)gfn_to_mfn(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+    (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Nov 11 18:11:34 2011 +0000
@@ -355,26 +355,37 @@
     unsigned long mfn;
     void *va;
 
-    mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt));
+    mfn = mfn_x(get_gfn_unshare(d, gmfn, &p2mt));
     if ( !p2m_is_ram(p2mt) )
+    {
+        put_gfn(d, gmfn);
         return -EINVAL;
+    }
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(d, gmfn);
+        put_gfn(d, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
+    {
+        put_gfn(d, gmfn);
         return -ENOENT;
+    }
     ASSERT(mfn_valid(mfn));
 
     page = mfn_to_page(mfn);
     if ( !get_page_and_type(page, d, PGT_writable_page) )
+    {
+        put_gfn(d, gmfn);
         return -EINVAL;
+    }
 
     va = map_domain_page_global(mfn);
     if ( va == NULL )
     {
         put_page_and_type(page);
+        put_gfn(d, gmfn);
         return -ENOMEM;
     }
 
@@ -385,6 +396,7 @@
         spin_unlock(&iorp->lock);
         unmap_domain_page_global(va);
         put_page_and_type(mfn_to_page(mfn));
+        put_gfn(d, gmfn);
         return -EINVAL;
     }
 
@@ -392,6 +404,7 @@
     iorp->page = page;
 
     spin_unlock(&iorp->lock);
+    put_gfn(d, gmfn);
 
     domain_unpause(d);
 
@@ -1182,6 +1195,7 @@
     mfn_t mfn;
     struct vcpu *v = current;
     struct p2m_domain *p2m;
+    int rc;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1216,7 +1230,7 @@
     }
 
     p2m = p2m_get_hostp2m(v->domain);
-    mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL);
+    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL);
 
     /* Check access permissions first, then handle faults */
     if ( access_valid && (mfn_x(mfn) != INVALID_MFN) )
@@ -1255,8 +1269,8 @@
         if ( violation )
         {
             p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w, 
access_x);
-
-            return 1;
+            rc = 1;
+            goto out_put_gfn;
         }
     }
 
@@ -1268,7 +1282,8 @@
     {
         if ( !handle_mmio() )
             hvm_inject_exception(TRAP_gp_fault, 0, 0);
-        return 1;
+        rc = 1;
+        goto out_put_gfn;
     }
 
 #ifdef __x86_64__
@@ -1281,7 +1296,8 @@
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
         mem_sharing_unshare_page(p2m->domain, gfn, 0);
-        return 1;
+        rc = 1;
+        goto out_put_gfn;
     }
 #endif
  
@@ -1295,7 +1311,8 @@
          */
         paging_mark_dirty(v->domain, mfn_x(mfn));
         p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        return 1;
+        rc = 1;
+        goto out_put_gfn;
     }
 
     /* Shouldn't happen: Maybe the guest was writing to a r/o grant mapping? */
@@ -1304,10 +1321,14 @@
         gdprintk(XENLOG_WARNING,
                  "trying to write to read-only grant mapping\n");
         hvm_inject_exception(TRAP_gp_fault, 0, 0);
-        return 1;
+        rc = 1;
+        goto out_put_gfn;
     }
 
-    return 0;
+    rc = 0;
+out_put_gfn:
+    put_gfn(p2m->domain, gfn);
+    return rc;
 }
 
 int hvm_handle_xsetbv(u64 new_bv)
@@ -1530,10 +1551,11 @@
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
-            mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt));
+            mfn = mfn_x(get_gfn(v->domain, gfn, &p2mt));
             if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
                  !get_page(mfn_to_page(mfn), v->domain))
             {
+                put_gfn(v->domain, gfn);
                 gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
                          v->arch.hvm_vcpu.guest_cr[3], mfn);
                 domain_crash(v->domain);
@@ -1545,6 +1567,7 @@
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
                         v->arch.hvm_vcpu.guest_cr[3], mfn);
+            put_gfn(v->domain, gfn);
         }
     }
     else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -1621,13 +1644,17 @@
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt));
+        mfn = mfn_x(get_gfn(v->domain, value >> PAGE_SHIFT, &p2mt));
         if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
              !get_page(mfn_to_page(mfn), v->domain) )
+        {
+              put_gfn(v->domain, value >> PAGE_SHIFT);
               goto bad_cr3;
+        }
 
         put_page(pagetable_get_page(v->arch.guest_table));
         v->arch.guest_table = pagetable_from_pfn(mfn);
+        put_gfn(v->domain, value >> PAGE_SHIFT);
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
     }
@@ -1764,6 +1791,7 @@
     return 0;
 }
 
+/* We leave this function holding a lock on the p2m entry */
 static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
 {
     unsigned long mfn;
@@ -1771,13 +1799,17 @@
     struct domain *d = current->domain;
 
     mfn = mfn_x(writable
-                ? gfn_to_mfn_unshare(d, gfn, &p2mt)
-                : gfn_to_mfn(d, gfn, &p2mt));
+                ? get_gfn_unshare(d, gfn, &p2mt)
+                : get_gfn(d, gfn, &p2mt));
     if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
+    {
+        put_gfn(d, gfn);
         return NULL;
+    }
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(d, gfn);
+        put_gfn(d, gfn);
         return NULL;
     }
 
@@ -1805,9 +1837,8 @@
         unmap_domain_page(p);
 }
 
-static void *hvm_map_entry(unsigned long va)
+static void *hvm_map_entry(unsigned long va, unsigned long *gfn)
 {
-    unsigned long gfn;
     uint32_t pfec;
     char *v;
 
@@ -1824,11 +1855,11 @@
      * treat it as a kernel-mode read (i.e. no access checks).
      */
     pfec = PFEC_page_present;
-    gfn = paging_gva_to_gfn(current, va, &pfec);
+    *gfn = paging_gva_to_gfn(current, va, &pfec);
     if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
         goto fail;
 
-    v = hvm_map_guest_frame_rw(gfn);
+    v = hvm_map_guest_frame_rw(*gfn);
     if ( v == NULL )
         goto fail;
 
@@ -1839,9 +1870,11 @@
     return NULL;
 }
 
-static void hvm_unmap_entry(void *p)
+static void hvm_unmap_entry(void *p, unsigned long gfn)
 {
     hvm_unmap_guest_frame(p);
+    if ( p && (gfn != INVALID_GFN) )
+        put_gfn(current->domain, gfn);
 }
 
 static int hvm_load_segment_selector(
@@ -1853,6 +1886,7 @@
     int fault_type = TRAP_invalid_tss;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct vcpu *v = current;
+    unsigned long pdesc_gfn = INVALID_GFN;
 
     if ( regs->eflags & X86_EFLAGS_VM )
     {
@@ -1886,7 +1920,7 @@
     if ( ((sel & 0xfff8) + 7) > desctab.limit )
         goto fail;
 
-    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8));
+    pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &pdesc_gfn);
     if ( pdesc == NULL )
         goto hvm_map_fail;
 
@@ -1946,7 +1980,7 @@
     desc.b |= 0x100;
 
  skip_accessed_flag:
-    hvm_unmap_entry(pdesc);
+    hvm_unmap_entry(pdesc, pdesc_gfn);
 
     segr.base = (((desc.b <<  0) & 0xff000000u) |
                  ((desc.b << 16) & 0x00ff0000u) |
@@ -1962,7 +1996,7 @@
     return 0;
 
  unmap_and_fail:
-    hvm_unmap_entry(pdesc);
+    hvm_unmap_entry(pdesc, pdesc_gfn);
  fail:
     hvm_inject_exception(fault_type, sel & 0xfffc, 0);
  hvm_map_fail:
@@ -1977,7 +2011,7 @@
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct segment_register gdt, tr, prev_tr, segr;
     struct desc_struct *optss_desc = NULL, *nptss_desc = NULL, tss_desc;
-    unsigned long eflags;
+    unsigned long eflags, optss_gfn = INVALID_GFN, nptss_gfn = INVALID_GFN;
     int exn_raised, rc;
     struct {
         u16 back_link,__blh;
@@ -2003,11 +2037,11 @@
         goto out;
     }
 
-    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8));
+    optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8), &optss_gfn);
     if ( optss_desc == NULL )
         goto out;
 
-    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8));
+    nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8), &nptss_gfn);
     if ( nptss_desc == NULL )
         goto out;
 
@@ -2172,8 +2206,8 @@
     }
 
  out:
-    hvm_unmap_entry(optss_desc);
-    hvm_unmap_entry(nptss_desc);
+    hvm_unmap_entry(optss_desc, optss_gfn);
+    hvm_unmap_entry(nptss_desc, nptss_gfn);
 }
 
 #define HVMCOPY_from_guest (0u<<0)
@@ -2230,19 +2264,29 @@
             gfn = addr >> PAGE_SHIFT;
         }
 
-        mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn, &p2mt));
+        mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
 
         if ( p2m_is_paging(p2mt) )
         {
             p2m_mem_paging_populate(curr->domain, gfn);
+            put_gfn(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
+        {
+            put_gfn(curr->domain, gfn);
             return HVMCOPY_gfn_shared;
+        }
         if ( p2m_is_grant(p2mt) )
+        {
+            put_gfn(curr->domain, gfn);
             return HVMCOPY_unhandleable;
+        }
         if ( !p2m_is_ram(p2mt) )
+        {
+            put_gfn(curr->domain, gfn);
             return HVMCOPY_bad_gfn_to_mfn;
+        }
         ASSERT(mfn_valid(mfn));
 
         p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
@@ -2273,6 +2317,7 @@
         addr += count;
         buf  += count;
         todo -= count;
+        put_gfn(curr->domain, gfn);
     }
 
     return HVMCOPY_okay;
@@ -3697,11 +3742,11 @@
         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
         {
             p2m_type_t t;
-            mfn_t mfn = gfn_to_mfn(d, pfn, &t);
+            mfn_t mfn = get_gfn(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
                 p2m_mem_paging_populate(d, pfn);
-
+                put_gfn(d, pfn);
                 rc = -EINVAL;
                 goto param_fail3;
             }
@@ -3716,6 +3761,7 @@
                 /* don't take a long time and don't die either */
                 sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
             }
+            put_gfn(d, pfn);
         }
 
     param_fail3:
@@ -3739,7 +3785,7 @@
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
-            gfn_to_mfn_unshare(d, a.pfn, &t);
+            get_gfn_unshare_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
             else if ( p2m_is_readonly(t) )
@@ -3792,20 +3838,23 @@
             p2m_type_t t;
             p2m_type_t nt;
             mfn_t mfn;
-            mfn = gfn_to_mfn_unshare(d, pfn, &t);
+            mfn = get_gfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
                 p2m_mem_paging_populate(d, pfn);
+                put_gfn(d, pfn);
                 rc = -EINVAL;
                 goto param_fail4;
             }
             if ( p2m_is_shared(t) )
             {
+                put_gfn(d, pfn);
                 rc = -EINVAL;
                 goto param_fail4;
             } 
             if ( p2m_is_grant(t) )
             {
+                put_gfn(d, pfn);
                 gdprintk(XENLOG_WARNING,
                          "type for pfn 0x%lx changed to grant while "
                          "we were working?\n", pfn);
@@ -3816,6 +3865,7 @@
                 nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
                 if ( nt != t )
                 {
+                    put_gfn(d, pfn);
                     gdprintk(XENLOG_WARNING,
                              "type of pfn 0x%lx changed from %d to %d while "
                              "we were trying to change it to %d\n",
@@ -3823,6 +3873,7 @@
                     goto param_fail4;
                 }
             }
+            put_gfn(d, pfn);
         }
 
         rc = 0;
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/mtrr.c   Fri Nov 11 18:11:34 2011 +0000
@@ -389,7 +389,7 @@
     {
         struct domain *d = v->domain;
         p2m_type_t p2mt;
-        gfn_to_mfn_query(d, paddr_to_pfn(gpaddr), &p2mt);
+        get_gfn_query_unlocked(d, paddr_to_pfn(gpaddr), &p2mt);
         if (p2m_is_ram(p2mt))
             gdprintk(XENLOG_WARNING,
                     "Conflict occurs for a given guest l1e flags:%x "
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/stdvga.c Fri Nov 11 18:11:34 2011 +0000
@@ -482,7 +482,7 @@
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
@@ -504,7 +504,7 @@
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
                         return 0;
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Fri Nov 11 18:11:34 2011 +0000
@@ -81,6 +81,10 @@
         if (nv->nv_vvmcx == NULL)
             return 0;
         nv->nv_vvmcxaddr = vmcbaddr;
+        /* put_gfn here even though the map survives beyond this caller.
+         * The map can likely survive beyond a hypervisor exit, thus we
+         * need to put the gfn */
+        put_gfn(current->domain, vmcbaddr >> PAGE_SHIFT);
     }
 
     return 1;
@@ -354,6 +358,7 @@
     ioport_80 = test_bit(0x80, ns_viomap);
     ioport_ed = test_bit(0xed, ns_viomap);
     hvm_unmap_guest_frame(ns_viomap);
+    put_gfn(current->domain, svm->ns_iomap_pa >> PAGE_SHIFT);
 
     svm->ns_iomap = nestedhvm_vcpu_iomap_get(ioport_80, ioport_ed);
 
@@ -857,23 +862,25 @@
     ioio_info_t ioinfo;
     uint16_t port;
     bool_t enabled;
+    unsigned long gfn = 0; /* gcc ... */
 
     ioinfo.bytes = exitinfo1;
     port = ioinfo.fields.port;
 
     switch (port) {
     case 0 ... 32767: /* first 4KB page */
-        io_bitmap = hvm_map_guest_frame_ro(iopm_gfn);
+        gfn = iopm_gfn;
         break;
     case 32768 ... 65535: /* second 4KB page */
         port -= 32768;
-        io_bitmap = hvm_map_guest_frame_ro(iopm_gfn+1);
+        gfn = iopm_gfn + 1;
         break;
     default:
         BUG();
         break;
     }
 
+    io_bitmap = hvm_map_guest_frame_ro(gfn);
     if (io_bitmap == NULL) {
         gdprintk(XENLOG_ERR,
             "IOIO intercept: mapping of permission map failed\n");
@@ -882,6 +889,8 @@
 
     enabled = test_bit(port, io_bitmap);
     hvm_unmap_guest_frame(io_bitmap);
+    put_gfn(current->domain, gfn);
+
     if (!enabled)
         return NESTEDHVM_VMEXIT_HOST;
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Nov 11 18:11:34 2011 +0000
@@ -244,9 +244,10 @@
     {
         if ( c->cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
+            mfn = mfn_x(get_gfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
+                put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
                          c->cr3);
                 return -EINVAL;
@@ -257,6 +258,8 @@
             put_page(pagetable_get_page(v->arch.guest_table));
 
         v->arch.guest_table = pagetable_from_pfn(mfn);
+        if ( c->cr0 & X86_CR0_PG )
+            put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
     }
 
     v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
@@ -1161,7 +1164,9 @@
         p2m = p2m_get_p2m(v);
         _d.gpa = gpa;
         _d.qualification = 0;
-        _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, 
p2m_query, NULL));
+        mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, p2m_query, NULL);
+        __put_gfn(p2m, gfn);
+        _d.mfn = mfn_x(mfn);
         
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
@@ -1181,7 +1186,8 @@
     if ( p2m == NULL )
         p2m = p2m_get_p2m(v);
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL);
+    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, p2m_guest, NULL);
+    __put_gfn(p2m, gfn);
     gdprintk(XENLOG_ERR,
          "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
          gpa, mfn_x(mfn), p2mt);
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c       Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/viridian.c       Fri Nov 11 18:11:34 2011 +0000
@@ -134,12 +134,13 @@
 static void enable_hypercall_page(struct domain *d)
 {
     unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
-    unsigned long mfn = gmfn_to_mfn(d, gmfn);
+    unsigned long mfn = get_gfn_untyped(d, gmfn);
     uint8_t *p;
 
     if ( !mfn_valid(mfn) ||
          !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
     {
+        put_gfn(d, gmfn); 
         gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
         return;
     }
@@ -162,13 +163,14 @@
     unmap_domain_page(p);
 
     put_page_and_type(mfn_to_page(mfn));
+    put_gfn(d, gmfn); 
 }
 
 void initialize_apic_assist(struct vcpu *v)
 {
     struct domain *d = v->domain;
     unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn;
-    unsigned long mfn = gmfn_to_mfn(d, gmfn);
+    unsigned long mfn = get_gfn_untyped(d, gmfn);
     uint8_t *p;
 
     /*
@@ -184,6 +186,7 @@
     if ( !mfn_valid(mfn) ||
          !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
     {
+        put_gfn(d, gmfn); 
         gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
         return;
     }
@@ -195,6 +198,7 @@
     unmap_domain_page(p);
 
     put_page_and_type(mfn_to_page(mfn));
+    put_gfn(d, gmfn); 
 }
 
 int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Nov 11 18:11:34 2011 +0000
@@ -487,9 +487,10 @@
     {
         if ( cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
+            mfn = mfn_x(get_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
             if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
             {
+                put_gfn(v->domain, cr3 >> PAGE_SHIFT);
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
                 return -EINVAL;
             }
@@ -499,6 +500,8 @@
             put_page(pagetable_get_page(v->arch.guest_table));
 
         v->arch.guest_table = pagetable_from_pfn(mfn);
+        if ( cr0 & X86_CR0_PG )
+            put_gfn(v->domain, cr3 >> PAGE_SHIFT);
     }
 
     v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
@@ -1007,9 +1010,12 @@
     if ( cr3 & 0x1fUL )
         goto crash;
 
-    mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
+    mfn = mfn_x(get_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
     if ( !p2m_is_ram(p2mt) )
+    {
+        put_gfn(v->domain, cr3 >> PAGE_SHIFT);
         goto crash;
+    }
 
     p = map_domain_page(mfn);
 
@@ -1037,6 +1043,7 @@
     vmx_vmcs_exit(v);
 
     unmap_domain_page(p);
+    put_gfn(v->domain, cr3 >> PAGE_SHIFT);
     return;
 
  crash:
@@ -2090,7 +2097,7 @@
 
         _d.gpa = gpa;
         _d.qualification = qualification;
-        _d.mfn = mfn_x(gfn_to_mfn_query(d, gfn, &_d.p2mt));
+        _d.mfn = mfn_x(get_gfn_query_unlocked(d, gfn, &_d.p2mt));
         
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
@@ -2106,7 +2113,7 @@
         return;
 
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
+    mfn = get_gfn_guest_unlocked(d, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Fri Nov 11 18:11:34 2011 +0000
@@ -558,9 +558,12 @@
 
     index = vmcs_reg == IO_BITMAP_A ? 0 : 1;
     if (nvmx->iobitmap[index])
-        hvm_unmap_guest_frame (nvmx->iobitmap[index]);
+        hvm_unmap_guest_frame (nvmx->iobitmap[index]); 
     gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
     nvmx->iobitmap[index] = hvm_map_guest_frame_ro (gpa >> PAGE_SHIFT);
+    /* See comment in nestedsvm_vmcb_map re putting this gfn and 
+     * liveness of the map it backs */
+    put_gfn(current->domain, gpa >> PAGE_SHIFT);
 }
 
 static inline void map_io_bitmap_all(struct vcpu *v)
@@ -577,12 +580,12 @@
 
     __clear_current_vvmcs(v);
     if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
-        hvm_unmap_guest_frame (nvcpu->nv_vvmcx);
+        hvm_unmap_guest_frame(nvcpu->nv_vvmcx);
     nvcpu->nv_vvmcx == NULL;
     nvcpu->nv_vvmcxaddr = VMCX_EADDR;
     for (i=0; i<2; i++) {
         if ( nvmx->iobitmap[i] ) {
-            hvm_unmap_guest_frame (nvmx->iobitmap[i]);
+            hvm_unmap_guest_frame(nvmx->iobitmap[i]); 
             nvmx->iobitmap[i] = NULL;
         }
     }
@@ -1138,6 +1141,9 @@
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw (gpa >> PAGE_SHIFT);
         nvcpu->nv_vvmcxaddr = gpa;
         map_io_bitmap_all (v);
+        /* See comment in nestedsvm_vmcb_map regarding putting this 
+         * gfn and liveness of the map that uses it */
+        put_gfn(current->domain, gpa >> PAGE_SHIFT);
     }
 
     vmreturn(regs, VMSUCCEED);
@@ -1199,6 +1205,7 @@
         if ( vvmcs ) 
             __set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0);
         hvm_unmap_guest_frame(vvmcs);
+        put_gfn(current->domain, gpa >> PAGE_SHIFT);
     }
 
     vmreturn(regs, VMSUCCEED);
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm.c Fri Nov 11 18:11:34 2011 +0000
@@ -663,13 +663,19 @@
         return 0;
 
     gmfn = l1e_get_pfn(l1e);
-    mfn = gmfn_to_mfn(d, gmfn);
+    mfn = get_gfn_untyped(d, gmfn);
     if ( unlikely(!mfn_valid(mfn)) )
+    {
+        put_gfn(d, gmfn); 
         return 0;
+    }
 
     okay = get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page);
     if ( unlikely(!okay) )
+    {
+        put_gfn(d, gmfn); 
         return 0;
+    }
 
     nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
 
@@ -678,6 +684,7 @@
     v->arch.pv_vcpu.shadow_ldt_mapcnt++;
     spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
 
+    put_gfn(d, gmfn); 
     return 1;
 }
 
@@ -1798,7 +1805,6 @@
 {
     l1_pgentry_t ol1e;
     struct domain *pt_dom = pt_vcpu->domain;
-    unsigned long mfn;
     p2m_type_t p2mt;
     int rc = 0;
 
@@ -1815,9 +1821,14 @@
     if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
     {
         /* Translate foreign guest addresses. */
-        mfn = mfn_x(gfn_to_mfn(pg_dom, l1e_get_pfn(nl1e), &p2mt));
+        unsigned long mfn, gfn;
+        gfn = l1e_get_pfn(nl1e);
+        mfn = mfn_x(get_gfn(pg_dom, gfn, &p2mt));
         if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
+        {
+            put_gfn(pg_dom, gfn);
             return -EINVAL;
+        }
         ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
         nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
 
@@ -1825,6 +1836,7 @@
         {
             MEM_LOG("Bad L1 flags %x",
                     l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom));
+            put_gfn(pg_dom, gfn);
             return -EINVAL;
         }
 
@@ -1835,12 +1847,14 @@
             if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
                               preserve_ad) )
                 return 0;
+            put_gfn(pg_dom, gfn);
             return -EBUSY;
         }
 
         switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
         {
         default:
+            put_gfn(pg_dom, gfn);
             return rc;
         case 0:
             break;
@@ -1856,6 +1870,7 @@
             ol1e = nl1e;
             rc = -EBUSY;
         }
+        put_gfn(pg_dom, gfn);
     }
     else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
                                      preserve_ad)) )
@@ -3023,7 +3038,7 @@
             if ( paging_mode_refcounts(pg_owner) )
                 break;
 
-            mfn = gmfn_to_mfn(pg_owner, op.arg1.mfn);
+            mfn = get_gfn_untyped(pg_owner, op.arg1.mfn);
             rc = get_page_and_type_from_pagenr(mfn, type, pg_owner, 0, 1);
             okay = !rc;
             if ( unlikely(!okay) )
@@ -3032,6 +3047,7 @@
                     rc = -EAGAIN;
                 else if ( rc != -EAGAIN )
                     MEM_LOG("Error while pinning mfn %lx", mfn);
+                put_gfn(pg_owner, op.arg1.mfn);
                 break;
             }
 
@@ -3040,6 +3056,7 @@
             if ( (rc = xsm_memory_pin_page(d, page)) != 0 )
             {
                 put_page_and_type(page);
+                put_gfn(pg_owner, op.arg1.mfn);
                 okay = 0;
                 break;
             }
@@ -3049,6 +3066,7 @@
             {
                 MEM_LOG("Mfn %lx already pinned", mfn);
                 put_page_and_type(page);
+                put_gfn(pg_owner, op.arg1.mfn);
                 okay = 0;
                 break;
             }
@@ -3067,6 +3085,7 @@
                 spin_unlock(&pg_owner->page_alloc_lock);
                 if ( drop_ref )
                     put_page_and_type(page);
+                put_gfn(pg_owner, op.arg1.mfn);
             }
 
             break;
@@ -3079,9 +3098,10 @@
             if ( paging_mode_refcounts(pg_owner) )
                 break;
 
-            mfn = gmfn_to_mfn(pg_owner, op.arg1.mfn);
+            mfn = get_gfn_untyped(pg_owner, op.arg1.mfn);
             if ( unlikely(!(okay = get_page_from_pagenr(mfn, pg_owner))) )
             {
+                put_gfn(pg_owner, op.arg1.mfn);
                 MEM_LOG("Mfn %lx bad domain", mfn);
                 break;
             }
@@ -3092,6 +3112,7 @@
             {
                 okay = 0;
                 put_page(page);
+                put_gfn(pg_owner, op.arg1.mfn);
                 MEM_LOG("Mfn %lx not pinned", mfn);
                 break;
             }
@@ -3102,18 +3123,20 @@
             /* A page is dirtied when its pin status is cleared. */
             paging_mark_dirty(pg_owner, mfn);
 
+            put_gfn(pg_owner, op.arg1.mfn);
             break;
         }
 
         case MMUEXT_NEW_BASEPTR:
-            okay = new_guest_cr3(gmfn_to_mfn(d, op.arg1.mfn));
+            okay = new_guest_cr3(get_gfn_untyped(d, op.arg1.mfn));
+            put_gfn(d, op.arg1.mfn);
             break;
         
 #ifdef __x86_64__
         case MMUEXT_NEW_USER_BASEPTR: {
             unsigned long old_mfn, mfn;
 
-            mfn = gmfn_to_mfn(d, op.arg1.mfn);
+            mfn = get_gfn_untyped(d, op.arg1.mfn);
             if ( mfn != 0 )
             {
                 if ( paging_mode_refcounts(d) )
@@ -3123,6 +3146,7 @@
                         mfn, PGT_root_page_table, d, 0, 0);
                 if ( unlikely(!okay) )
                 {
+                    put_gfn(d, op.arg1.mfn);
                     MEM_LOG("Error while installing new mfn %lx", mfn);
                     break;
                 }
@@ -3130,6 +3154,7 @@
 
             old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
             curr->arch.guest_table_user = pagetable_from_pfn(mfn);
+            put_gfn(d, op.arg1.mfn);
 
             if ( old_mfn != 0 )
             {
@@ -3247,11 +3272,12 @@
             unsigned long mfn;
             unsigned char *ptr;
 
-            mfn = gmfn_to_mfn(d, op.arg1.mfn);
+            mfn = get_gfn_untyped(d, op.arg1.mfn);
             okay = !get_page_and_type_from_pagenr(
                 mfn, PGT_writable_page, d, 0, 0);
             if ( unlikely(!okay) )
             {
+                put_gfn(d, op.arg1.mfn);
                 MEM_LOG("Error while clearing mfn %lx", mfn);
                 break;
             }
@@ -3264,6 +3290,7 @@
             fixunmap_domain_page(ptr);
 
             put_page_and_type(mfn_to_page(mfn));
+            put_gfn(d, op.arg1.mfn);
             break;
         }
 
@@ -3273,20 +3300,23 @@
             unsigned char *dst;
             unsigned long src_mfn, mfn;
 
-            src_mfn = gmfn_to_mfn(d, op.arg2.src_mfn);
+            src_mfn = get_gfn_untyped(d, op.arg2.src_mfn);
             okay = get_page_from_pagenr(src_mfn, d);
             if ( unlikely(!okay) )
             {
+                put_gfn(d, op.arg2.src_mfn);
                 MEM_LOG("Error while copying from mfn %lx", src_mfn);
                 break;
             }
 
-            mfn = gmfn_to_mfn(d, op.arg1.mfn);
+            mfn = get_gfn_untyped(d, op.arg1.mfn);
             okay = !get_page_and_type_from_pagenr(
                 mfn, PGT_writable_page, d, 0, 0);
             if ( unlikely(!okay) )
             {
+                put_gfn(d, op.arg1.mfn);
                 put_page(mfn_to_page(src_mfn));
+                put_gfn(d, op.arg2.src_mfn);
                 MEM_LOG("Error while copying to mfn %lx", mfn);
                 break;
             }
@@ -3301,7 +3331,9 @@
             unmap_domain_page(src);
 
             put_page_and_type(mfn_to_page(mfn));
+            put_gfn(d, op.arg1.mfn);
             put_page(mfn_to_page(src_mfn));
+            put_gfn(d, op.arg2.src_mfn);
             break;
         }
 
@@ -3489,14 +3521,14 @@
 
             req.ptr -= cmd;
             gmfn = req.ptr >> PAGE_SHIFT;
-            mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt));
+            mfn = mfn_x(get_gfn(pt_owner, gmfn, &p2mt));
             if ( !p2m_is_valid(p2mt) )
-              mfn = INVALID_MFN;
+                mfn = INVALID_MFN;
 
             if ( p2m_is_paged(p2mt) )
             {
                 p2m_mem_paging_populate(pg_owner, gmfn);
-
+                put_gfn(pt_owner, gmfn);
                 rc = -ENOENT;
                 break;
             }
@@ -3504,6 +3536,7 @@
             if ( unlikely(!get_page_from_pagenr(mfn, pt_owner)) )
             {
                 MEM_LOG("Could not get page for normal update");
+                put_gfn(pt_owner, gmfn);
                 break;
             }
 
@@ -3516,6 +3549,7 @@
             if ( rc ) {
                 unmap_domain_page_with_cache(va, &mapcache);
                 put_page(page);
+                put_gfn(pt_owner, gmfn);
                 break;
             }
 
@@ -3527,16 +3561,20 @@
                 {
                     l1_pgentry_t l1e = l1e_from_intpte(req.val);
                     p2m_type_t l1e_p2mt;
-                    gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt);
+                    unsigned long l1egfn = l1e_get_pfn(l1e), l1emfn;
+    
+                    l1emfn = mfn_x(get_gfn(pg_owner, l1egfn, &l1e_p2mt));
 
                     if ( p2m_is_paged(l1e_p2mt) )
                     {
                         p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
+                        put_gfn(pg_owner, l1egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_paging_in_start == l1e_p2mt && 
!mfn_valid(mfn) )
                     {
+                        put_gfn(pg_owner, l1egfn);
                         rc = -ENOENT;
                         break;
                     }
@@ -3553,7 +3591,10 @@
                                                           l1e_get_pfn(l1e), 
                                                           0);
                             if ( rc )
+                            {
+                                put_gfn(pg_owner, l1egfn);
                                 break; 
+                            }
                         }
                     } 
 #endif
@@ -3561,27 +3602,33 @@
                     rc = mod_l1_entry(va, l1e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
                                       pg_owner);
+                    put_gfn(pg_owner, l1egfn);
                 }
                 break;
                 case PGT_l2_page_table:
                 {
                     l2_pgentry_t l2e = l2e_from_intpte(req.val);
                     p2m_type_t l2e_p2mt;
-                    gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt);
+                    unsigned long l2egfn = l2e_get_pfn(l2e), l2emfn;
+
+                    l2emfn = mfn_x(get_gfn(pg_owner, l2egfn, &l2e_p2mt));
 
                     if ( p2m_is_paged(l2e_p2mt) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e));
+                        p2m_mem_paging_populate(pg_owner, l2egfn);
+                        put_gfn(pg_owner, l2egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_paging_in_start == l2e_p2mt && 
!mfn_valid(mfn) )
                     {
+                        put_gfn(pg_owner, l2egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_shared == l2e_p2mt )
                     {
+                        put_gfn(pg_owner, l2egfn);
                         MEM_LOG("Unexpected attempt to map shared page.\n");
                         break;
                     }
@@ -3589,33 +3636,40 @@
 
                     rc = mod_l2_entry(va, l2e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
+                    put_gfn(pg_owner, l2egfn);
                 }
                 break;
                 case PGT_l3_page_table:
                 {
                     l3_pgentry_t l3e = l3e_from_intpte(req.val);
                     p2m_type_t l3e_p2mt;
-                    gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt);
+                    unsigned long l3egfn = l3e_get_pfn(l3e), l3emfn;
+
+                    l3emfn = mfn_x(get_gfn(pg_owner, l3egfn, &l3e_p2mt));
 
                     if ( p2m_is_paged(l3e_p2mt) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e));
+                        p2m_mem_paging_populate(pg_owner, l3egfn);
+                        put_gfn(pg_owner, l3egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_paging_in_start == l3e_p2mt && 
!mfn_valid(mfn) )
                     {
+                        put_gfn(pg_owner, l3egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_shared == l3e_p2mt )
                     {
+                        put_gfn(pg_owner, l3egfn);
                         MEM_LOG("Unexpected attempt to map shared page.\n");
                         break;
                     }
 
                     rc = mod_l3_entry(va, l3e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
+                    put_gfn(pg_owner, l3egfn);
                 }
                 break;
 #if CONFIG_PAGING_LEVELS >= 4
@@ -3623,27 +3677,33 @@
                 {
                     l4_pgentry_t l4e = l4e_from_intpte(req.val);
                     p2m_type_t l4e_p2mt;
-                    gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt);
+                    unsigned long l4egfn = l4e_get_pfn(l4e), l4emfn;
+
+                    l4emfn = mfn_x(get_gfn(pg_owner, l4egfn, &l4e_p2mt));
 
                     if ( p2m_is_paged(l4e_p2mt) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e));
+                        p2m_mem_paging_populate(pg_owner, l4egfn);
+                        put_gfn(pg_owner, l4egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_paging_in_start == l4e_p2mt && 
!mfn_valid(mfn) )
                     {
+                        put_gfn(pg_owner, l4egfn);
                         rc = -ENOENT;
                         break;
                     }
                     else if ( p2m_ram_shared == l4e_p2mt )
                     {
+                        put_gfn(pg_owner, l4egfn);
                         MEM_LOG("Unexpected attempt to map shared page.\n");
                         break;
                     }
 
                     rc = mod_l4_entry(va, l4e, mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
+                    put_gfn(pg_owner, l4egfn);
                 }
                 break;
 #endif
@@ -3667,6 +3727,7 @@
 
             unmap_domain_page_with_cache(va, &mapcache);
             put_page(page);
+            put_gfn(pt_owner, gmfn);
         }
         break;
 
@@ -3753,10 +3814,11 @@
     adjust_guest_l1e(nl1e, d);
 
     gmfn = pte_addr >> PAGE_SHIFT;
-    mfn = gmfn_to_mfn(d, gmfn);
+    mfn = get_gfn_untyped(d, gmfn);
 
     if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
     {
+        put_gfn(d, gmfn);
         MEM_LOG("Could not get page for normal update");
         return GNTST_general_error;
     }
@@ -3794,6 +3856,7 @@
  failed:
     unmap_domain_page(va);
     put_page(page);
+    put_gfn(d, gmfn);
 
     return rc;
 }
@@ -3808,10 +3871,11 @@
     l1_pgentry_t ol1e;
 
     gmfn = addr >> PAGE_SHIFT;
-    mfn = gmfn_to_mfn(d, gmfn);
+    mfn = get_gfn_untyped(d, gmfn);
 
     if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
     {
+        put_gfn(d, gmfn);
         MEM_LOG("Could not get page for normal update");
         return GNTST_general_error;
     }
@@ -3863,6 +3927,7 @@
  failed:
     unmap_domain_page(va);
     put_page(page);
+    put_gfn(d, gmfn);
     return rc;
 }
 
@@ -4054,9 +4119,10 @@
     if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
         return GNTST_general_error;
 
-    old_mfn = gfn_to_mfn(d, gfn, &type);
+    old_mfn = get_gfn(d, gfn, &type);
     if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
     {
+        put_gfn(d, gfn);
         gdprintk(XENLOG_WARNING,
                  "replace_grant_p2m_mapping: old mapping invalid (type %d, mfn 
%lx, frame %lx)\n",
                  type, mfn_x(old_mfn), frame);
@@ -4064,6 +4130,7 @@
     }
     guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
 
+    put_gfn(d, gfn);
     return GNTST_okay;
 }
 
@@ -4444,15 +4511,20 @@
     struct domain *d = v->domain;
     /* NB. There are 512 8-byte entries per GDT page. */
     int i, nr_pages = (entries + 511) / 512;
-    unsigned long mfn;
+    unsigned long mfn, *pfns;
 
     if ( entries > FIRST_RESERVED_GDT_ENTRY )
         return -EINVAL;
 
+    pfns = xmalloc_array(unsigned long, nr_pages);
+    if ( !pfns )
+        return -ENOMEM;
+
     /* Check the pages in the new GDT. */
     for ( i = 0; i < nr_pages; i++ )
     {
-        mfn = frames[i] = gmfn_to_mfn(d, frames[i]);
+        pfns[i] = frames[i];
+        mfn = frames[i] = get_gfn_untyped(d, frames[i]);
         if ( !mfn_valid(mfn) ||
              !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) )
             goto fail;
@@ -4468,13 +4540,19 @@
         v->arch.pv_vcpu.gdt_frames[i] = frames[i];
         l1e_write(&v->arch.perdomain_ptes[i],
                   l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
+        put_gfn(d, pfns[i]);
     }
 
+    xfree(pfns);
     return 0;
 
  fail:
     while ( i-- > 0 )
+    {
         put_page_and_type(mfn_to_page(frames[i]));
+        put_gfn(d, pfns[i]);
+    }
+    xfree(pfns);
     return -EINVAL;
 }
 
@@ -4518,15 +4596,21 @@
 
     *(u64 *)&d = desc;
 
-    mfn = gmfn_to_mfn(dom, gmfn);
+    mfn = get_gfn_untyped(dom, gmfn);
     if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
          !mfn_valid(mfn) ||
          !check_descriptor(dom, &d) )
+    {
+        put_gfn(dom, gmfn);
         return -EINVAL;
+    }
 
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, dom)) )
+    {
+        put_gfn(dom, gmfn);
         return -EINVAL;
+    }
 
     /* Check if the given frame is in use in an unsafe context. */
     switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -4554,6 +4638,7 @@
 
  out:
     put_page(page);
+    put_gfn(dom, gmfn);
 
     return ret;
 }
@@ -4595,6 +4680,7 @@
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
 {
     struct page_info *page = NULL;
+    unsigned long gfn = 0; /* gcc ... */
     int rc;
 
     switch ( op )
@@ -4652,11 +4738,13 @@
         case XENMAPSPACE_gmfn:
         {
             p2m_type_t p2mt;
-
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+            gfn = xatp.idx;
+
+            xatp.idx = mfn_x(get_gfn_unshare(d, xatp.idx, &p2mt));
             /* If the page is still shared, exit early */
             if ( p2m_is_shared(p2mt) )
             {
+                put_gfn(d, gfn);
                 rcu_unlock_domain(d);
                 return -ENOMEM;
             }
@@ -4674,6 +4762,8 @@
         {
             if ( page )
                 put_page(page);
+            if ( xatp.space == XENMAPSPACE_gmfn )
+                put_gfn(d, gfn);
             rcu_unlock_domain(d);
             return -EINVAL;
         }
@@ -4684,7 +4774,7 @@
             put_page(page);
 
         /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+        prev_mfn = get_gfn_untyped(d, xatp.gpfn);
         if ( mfn_valid(prev_mfn) )
         {
             if ( is_xen_heap_mfn(prev_mfn) )
@@ -4694,6 +4784,8 @@
                 /* Normal domain memory is freed, to avoid leaking memory. */
                 guest_remove_page(d, xatp.gpfn);
         }
+        /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
+        put_gfn(d, xatp.gpfn);
 
         /* Unmap from old location, if any. */
         gpfn = get_gpfn_from_mfn(mfn);
@@ -4704,6 +4796,9 @@
         /* Map at new location. */
         rc = guest_physmap_add_page(d, xatp.gpfn, mfn, PAGE_ORDER_4K);
 
+        /* In the XENMAPSPACE_gmfn, we took a ref and locked the p2m at the 
top */
+        if ( xatp.space == XENMAPSPACE_gmfn )
+            put_gfn(d, gfn);
         domain_unlock(d);
 
         rcu_unlock_domain(d);
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Fri Nov 11 18:11:34 2011 +0000
@@ -86,30 +86,35 @@
     return 0;
 }
 
+/* If the map is non-NULL, we leave this function having 
+ * called get_gfn, you need to call put_gfn. */
 static inline void *map_domain_gfn(struct p2m_domain *p2m,
                                    gfn_t gfn, 
                                    mfn_t *mfn,
                                    p2m_type_t *p2mt,
                                    uint32_t *rc) 
 {
-    p2m_access_t a;
+    p2m_access_t p2ma;
 
     /* Translate the gfn, unsharing if shared */
-    *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare, NULL);
+    *mfn = get_gfn_type_access(p2m, gfn_x(gfn), p2mt, &p2ma, p2m_unshare, 
NULL);
     if ( p2m_is_paging(*p2mt) )
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
         p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+        __put_gfn(p2m, gfn_x(gfn));
         *rc = _PAGE_PAGED;
         return NULL;
     }
     if ( p2m_is_shared(*p2mt) )
     {
+        __put_gfn(p2m, gfn_x(gfn));
         *rc = _PAGE_SHARED;
         return NULL;
     }
     if ( !p2m_is_ram(*p2mt) ) 
     {
+        __put_gfn(p2m, gfn_x(gfn));
         *rc |= _PAGE_PRESENT;
         return NULL;
     }
@@ -120,6 +125,9 @@
 
 
 /* Walk the guest pagetables, after the manner of a hardware walker. */
+/* Because the walk is essentially random, it can cause a deadlock 
+ * warning in the p2m locking code. Highly unlikely this is an actual
+ * deadlock, because who would walk page table in the opposite order? */
 uint32_t
 guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
                   unsigned long va, walk_t *gw, 
@@ -288,7 +296,6 @@
 #endif
             rc |= _PAGE_INVALID_BITS;
         }
-
         /* Increment the pfn by the right number of 4k pages.  
          * Mask out PAT and invalid bits. */
         start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
@@ -347,12 +354,24 @@
 
  out:
 #if GUEST_PAGING_LEVELS == 4
-    if ( l3p ) unmap_domain_page(l3p);
+    if ( l3p ) 
+    {
+        unmap_domain_page(l3p);
+        __put_gfn(p2m, gfn_x(guest_l4e_get_gfn(gw->l4e)));
+    }
 #endif
 #if GUEST_PAGING_LEVELS >= 3
-    if ( l2p ) unmap_domain_page(l2p);
+    if ( l2p ) 
+    {
+        unmap_domain_page(l2p);
+        __put_gfn(p2m, gfn_x(guest_l3e_get_gfn(gw->l3e))); 
+    }
 #endif
-    if ( l1p ) unmap_domain_page(l1p);
+    if ( l1p ) 
+    {
+        unmap_domain_page(l1p);
+        __put_gfn(p2m, gfn_x(guest_l2e_get_gfn(gw->l2e)));
+    }
 
     return rc;
 }
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Fri Nov 11 18:11:34 2011 +0000
@@ -56,26 +56,30 @@
     p2m_type_t p2mt;
     p2m_access_t p2ma;
     walk_t gw;
+    unsigned long top_gfn;
 
     /* Get the top-level table's MFN */
-    top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT, 
-                                  &p2mt, &p2ma, p2m_unshare, NULL);
+    top_gfn = cr3 >> PAGE_SHIFT;
+    top_mfn = get_gfn_type_access(p2m, top_gfn, &p2mt, &p2ma, p2m_unshare, 
NULL);
     if ( p2m_is_paging(p2mt) )
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
         p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
+        __put_gfn(p2m, top_gfn);
         return INVALID_GFN;
     }
     if ( p2m_is_shared(p2mt) )
     {
         pfec[0] = PFEC_page_shared;
+        __put_gfn(p2m, top_gfn);
         return INVALID_GFN;
     }
     if ( !p2m_is_ram(p2mt) )
     {
         pfec[0] &= ~PFEC_page_present;
+        __put_gfn(p2m, top_gfn);
         return INVALID_GFN;
     }
 
@@ -87,26 +91,31 @@
 #endif
     missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
     unmap_domain_page(top_map);
+    __put_gfn(p2m, top_gfn);
 
     /* Interpret the answer */
     if ( missing == 0 )
     {
         gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
-        gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare, NULL);
+        (void)get_gfn_type_access(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare, 
NULL); 
         if ( p2m_is_paging(p2mt) )
         {
             ASSERT(!p2m_is_nestedp2m(p2m));
             p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
+            __put_gfn(p2m, gfn_x(gfn));
             return INVALID_GFN;
         }
         if ( p2m_is_shared(p2mt) )
         {
             pfec[0] = PFEC_page_shared;
+            __put_gfn(p2m, gfn_x(gfn));
             return INVALID_GFN;
         }
 
+        __put_gfn(p2m, gfn_x(gfn));
+
         if ( page_order )
             *page_order = guest_walk_to_page_order(&gw);
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/hap/nested_hap.c
--- a/xen/arch/x86/mm/hap/nested_hap.c  Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/hap/nested_hap.c  Fri Nov 11 18:11:34 2011 +0000
@@ -146,22 +146,29 @@
     mfn_t mfn;
     p2m_type_t p2mt;
     p2m_access_t p2ma;
+    int rc;
 
     /* walk L0 P2M table */
-    mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, 
+    mfn = get_gfn_type_access(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, 
                               p2m_query, page_order);
 
+    rc = NESTEDHVM_PAGEFAULT_MMIO;
     if ( p2m_is_mmio(p2mt) )
-        return NESTEDHVM_PAGEFAULT_MMIO;
+        goto out;
 
+    rc = NESTEDHVM_PAGEFAULT_ERROR;
     if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) )
-        return NESTEDHVM_PAGEFAULT_ERROR;
+        goto out;
 
+    rc = NESTEDHVM_PAGEFAULT_ERROR;
     if ( !mfn_valid(mfn) )
-        return NESTEDHVM_PAGEFAULT_ERROR;
+        goto out;
 
     *L0_gpa = (mfn_x(mfn) << PAGE_SHIFT) + (L1_gpa & ~PAGE_MASK);
-    return NESTEDHVM_PAGEFAULT_DONE;
+    rc = NESTEDHVM_PAGEFAULT_DONE;
+out:
+    __put_gfn(p2m, L1_gpa >> PAGE_SHIFT);
+    return rc;
 }
 
 /* This function uses L2_gpa to walk the P2M page table in L1. If the 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/mem_event.c       Fri Nov 11 18:11:34 2011 +0000
@@ -47,7 +47,7 @@
     unsigned long ring_addr = mec->ring_addr;
     unsigned long shared_addr = mec->shared_addr;
     l1_pgentry_t l1e;
-    unsigned long gfn;
+    unsigned long shared_gfn = 0, ring_gfn = 0; /* gcc ... */
     p2m_type_t p2mt;
     mfn_t ring_mfn;
     mfn_t shared_mfn;
@@ -60,23 +60,36 @@
 
     /* Get MFN of ring page */
     guest_get_eff_l1e(v, ring_addr, &l1e);
-    gfn = l1e_get_pfn(l1e);
-    ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+    ring_gfn = l1e_get_pfn(l1e);
+    /* We're grabbing these two in an order that could deadlock
+     * dom0 if 1. it were an hvm 2. there were two concurrent
+     * enables 3. the two gfn's in each enable criss-crossed
+     * 2MB regions. Duly noted.... */
+    ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt);
 
     if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
+    {
+        put_gfn(dom_mem_event, ring_gfn);
         return -EINVAL;
+    }
 
     /* Get MFN of shared page */
     guest_get_eff_l1e(v, shared_addr, &l1e);
-    gfn = l1e_get_pfn(l1e);
-    shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+    shared_gfn = l1e_get_pfn(l1e);
+    shared_mfn = get_gfn(dom_mem_event, shared_gfn, &p2mt);
 
     if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
+    {
+        put_gfn(dom_mem_event, ring_gfn);
+        put_gfn(dom_mem_event, shared_gfn);
         return -EINVAL;
+    }
 
     /* Map ring and shared pages */
     med->ring_page = map_domain_page(mfn_x(ring_mfn));
     med->shared_page = map_domain_page(mfn_x(shared_mfn));
+    put_gfn(dom_mem_event, ring_gfn);
+    put_gfn(dom_mem_event, shared_gfn); 
 
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d->vcpu[0],
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Fri Nov 11 18:11:34 2011 +0000
@@ -227,7 +227,7 @@
                             g->domain, g->gfn, mfn_x(e->mfn));
                     continue;
                 }
-                mfn = gfn_to_mfn(d, g->gfn, &t); 
+                mfn = get_gfn_unlocked(d, g->gfn, &t); 
                 if(mfn_x(mfn) != mfn_x(e->mfn))
                     MEM_SHARING_DEBUG("Incorrect P2M for d=%d, PFN=%lx."
                                       "Expecting MFN=%ld, got %ld\n",
@@ -335,7 +335,7 @@
     p2m_type_t p2mt;
     mfn_t mfn;
 
-    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    mfn = get_gfn_unlocked(d, gfn, &p2mt);
 
     printk("Debug for domain=%d, gfn=%lx, ", 
             d->domain_id, 
@@ -460,7 +460,7 @@
     *phandle = 0UL;
 
     shr_lock(); 
-    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    mfn = get_gfn(d, gfn, &p2mt);
 
     /* Check if mfn is valid */
     ret = -EINVAL;
@@ -524,6 +524,7 @@
     ret = 0;
 
 out:
+    put_gfn(d, gfn);
     shr_unlock();
     return ret;
 }
@@ -593,14 +594,18 @@
     shr_handle_t handle;
     struct list_head *le;
 
+    /* Remove the gfn_info from the list */
+   
+    /* This is one of the reasons why we can't enforce ordering
+     * between shr_lock and p2m fine-grained locks in mm-lock. 
+     * Callers may walk in here already holding the lock for this gfn */
     shr_lock();
     mem_sharing_audit();
-    
-    /* Remove the gfn_info from the list */
-    mfn = gfn_to_mfn(d, gfn, &p2mt);
+    mfn = get_gfn(d, gfn, &p2mt);
     
     /* Has someone already unshared it? */
     if (!p2m_is_shared(p2mt)) {
+        put_gfn(d, gfn);
         shr_unlock();
         return 0;
     }
@@ -634,6 +639,7 @@
             /* Even though we don't allocate a private page, we have to account
              * for the MFN that originally backed this PFN. */
             atomic_dec(&nr_saved_mfns);
+        put_gfn(d, gfn);
         shr_unlock();
         put_page_and_type(page);
         if(last_gfn && 
@@ -653,6 +659,7 @@
         /* We've failed to obtain memory for private page. Need to re-add the
          * gfn_info to relevant list */
         list_add(&gfn_info->list, &hash_entry->gfns);
+        put_gfn(d, gfn);
         shr_unlock();
         return -ENOMEM;
     }
@@ -663,6 +670,13 @@
     unmap_domain_page(s);
     unmap_domain_page(t);
 
+    /* NOTE: set_shared_p2m_entry will switch the underlying mfn. If
+     * we do get_page withing get_gfn, the correct sequence here
+     * should be
+       get_page(page);
+       put_page(old_page);
+     * so that the ref to the old page is dropped, and a ref to
+     * the new page is obtained to later be dropped in put_gfn */
     BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) == 0);
     put_page_and_type(old_page);
 
@@ -683,6 +697,7 @@
     /* Update m2p entry */
     set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
 
+    put_gfn(d, gfn);
     shr_unlock();
     return 0;
 }
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Fri Nov 11 18:11:34 2011 +0000
@@ -531,9 +531,10 @@
     /* FIXME: Add contiguous; query for PSE entries? */
     for ( i=0; i<(1<<order); i++)
     {
+        p2m_access_t a;
         p2m_type_t t;
 
-        gfn_to_mfn_query(d, gpfn + i, &t);
+        (void)p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL);
 
         if ( t == p2m_populate_on_demand )
             pod++;
@@ -572,8 +573,9 @@
     {
         mfn_t mfn;
         p2m_type_t t;
+        p2m_access_t a;
 
-        mfn = gfn_to_mfn_query(d, gpfn + i, &t);
+        mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, p2m_query, NULL);
         if ( t == p2m_populate_on_demand )
         {
             set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, 
p2m->default_access);
@@ -653,8 +655,8 @@
      * and aligned, and mapping them. */
     for ( i=0; i<SUPERPAGE_PAGES; i++ )
     {
-        
-        mfn = gfn_to_mfn_query(d, gfn + i, &type);
+        p2m_access_t a; 
+        mfn = p2m->get_entry(p2m, gfn + i, &type, &a, p2m_query, NULL);
 
         if ( i == 0 )
         {
@@ -782,7 +784,8 @@
     /* First, get the gfn list, translate to mfns, and map the pages. */
     for ( i=0; i<count; i++ )
     {
-        mfns[i] = gfn_to_mfn_query(d, gfns[i], types + i);
+        p2m_access_t a;
+        mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, p2m_query, NULL);
         /* If this is ram, and not a pagetable or from the xen heap, and 
probably not mapped
            elsewhere, map it; otherwise, skip. */
         if ( p2m_is_ram(types[i])
@@ -923,7 +926,8 @@
     /* FIXME: Figure out how to avoid superpages */
     for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
     {
-        gfn_to_mfn_query(p2m->domain, i, &t );
+        p2m_access_t a;
+        (void)p2m->get_entry(p2m, i, &t, &a, p2m_query, NULL);
         if ( p2m_is_ram(t) )
         {
             gfns[j] = i;
@@ -1112,7 +1116,8 @@
     /* Make sure all gpfns are unused */
     for ( i = 0; i < (1UL << order); i++ )
     {
-        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
+        p2m_access_t a;
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL);
         if ( p2m_is_ram(ot) )
         {
             printk("%s: gfn_to_mfn returned type %d!\n",
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri Nov 11 18:11:34 2011 +0000
@@ -144,7 +144,7 @@
     p2m_unlock(p2m);
 }
 
-mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
+mfn_t get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
                     unsigned int *page_order)
 {
@@ -343,28 +343,27 @@
 #ifdef __x86_64__
     unsigned long gfn;
     p2m_type_t t;
-    p2m_access_t a;
     mfn_t mfn;
 #endif
 
     if (p2m == NULL)
         return;
 
+    p2m_lock(p2m);
+
 #ifdef __x86_64__
     for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
-        mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query, NULL);
+        p2m_access_t a;
+        mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
         {
             ASSERT(!p2m_is_nestedp2m(p2m));
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
         }
-
     }
 #endif
 
-    p2m_lock(p2m);
-
     p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
@@ -454,6 +453,7 @@
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     unsigned long i, ogfn;
     p2m_type_t ot;
+    p2m_access_t a;
     mfn_t omfn;
     int pod_count = 0;
     int rc = 0;
@@ -489,12 +489,13 @@
     /* First, remove m->p mappings for existing p->m mappings */
     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
+        omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, p2m_query, NULL);
         if ( p2m_is_grant(ot) )
         {
             /* Really shouldn't be unmapping grant maps this way */
             domain_crash(d);
             p2m_unlock(p2m);
+            
             return -EINVAL;
         }
         else if ( p2m_is_ram(ot) )
@@ -528,7 +529,7 @@
              * address */
             P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                       mfn + i, ogfn, gfn + i);
-            omfn = gfn_to_mfn_query(d, ogfn, &ot);
+            omfn = p2m->get_entry(p2m, ogfn, &ot, &a, p2m_query, NULL);
             if ( p2m_is_ram(ot) )
             {
                 ASSERT(mfn_valid(omfn));
@@ -577,6 +578,7 @@
 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, 
                            p2m_type_t ot, p2m_type_t nt)
 {
+    p2m_access_t a;
     p2m_type_t pt;
     mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
@@ -585,7 +587,7 @@
 
     p2m_lock(p2m);
 
-    mfn = gfn_to_mfn_query(d, gfn, &pt);
+    mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
     if ( pt == ot )
         set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
 
@@ -600,6 +602,7 @@
                            unsigned long start, unsigned long end,
                            p2m_type_t ot, p2m_type_t nt)
 {
+    p2m_access_t a;
     p2m_type_t pt;
     unsigned long gfn;
     mfn_t mfn;
@@ -612,7 +615,7 @@
 
     for ( gfn = start; gfn < end; gfn++ )
     {
-        mfn = gfn_to_mfn_query(d, gfn, &pt);
+        mfn = p2m->get_entry(p2m, gfn, &pt, &a, p2m_query, NULL);
         if ( pt == ot )
             set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, 
p2m->default_access);
     }
@@ -629,6 +632,7 @@
 set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
     int rc = 0;
+    p2m_access_t a;
     p2m_type_t ot;
     mfn_t omfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
@@ -637,7 +641,7 @@
         return 0;
 
     p2m_lock(p2m);
-    omfn = gfn_to_mfn_query(d, gfn, &ot);
+    omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
     if ( p2m_is_grant(ot) )
     {
         p2m_unlock(p2m);
@@ -657,7 +661,7 @@
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
+            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
 
@@ -666,6 +670,7 @@
 {
     int rc = 0;
     mfn_t mfn;
+    p2m_access_t a;
     p2m_type_t t;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
@@ -673,7 +678,7 @@
         return 0;
 
     p2m_lock(p2m);
-    mfn = gfn_to_mfn_query(d, gfn, &t);
+    mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query, NULL);
 
     /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
     if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
@@ -696,6 +701,7 @@
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc = 0;
+    p2m_access_t a;
     p2m_type_t ot;
     mfn_t omfn;
 
@@ -703,7 +709,7 @@
         return 0;
 
     p2m_lock(p2m);
-    omfn = gfn_to_mfn_query(p2m->domain, gfn, &ot);
+    omfn = p2m->get_entry(p2m, gfn, &ot, &a, p2m_query, NULL);
     /* At the moment we only allow p2m change if gfn has already been made
      * sharable first */
     ASSERT(p2m_is_shared(ot));
@@ -717,7 +723,7 @@
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
+            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
 
@@ -1168,7 +1174,7 @@
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     unsigned long pfn;
-    p2m_access_t a;
+    p2m_access_t a, _a;
     p2m_type_t t;
     mfn_t mfn;
     int rc = 0;
@@ -1202,7 +1208,7 @@
     p2m_lock(p2m);
     for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ )
     {
-        mfn = gfn_to_mfn_query(d, pfn, &t);
+        mfn = p2m->get_entry(p2m, pfn, &t, &_a, p2m_query, NULL);
         if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
         {
             rc = -ENOMEM;
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Nov 11 18:11:34 2011 +0000
@@ -3676,7 +3676,7 @@
 
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
-            mfn_t mfn = gfn_to_mfn_query(d, begin_pfn + i, &t);
+            mfn_t mfn = get_gfn_query(d, begin_pfn + i, &t);
             struct page_info *page;
             int dirty = 0;
             paddr_t sl1ma = dirty_vram->sl1ma[i];
@@ -3741,6 +3741,8 @@
                 }
             }
 
+            put_gfn(d, begin_pfn + i);
+
             if ( dirty )
             {
                 dirty_vram->dirty_bitmap[i / 8] |= 1 << (i % 8);
@@ -3761,7 +3763,7 @@
                 /* was clean for more than two seconds, try to disable guest
                  * write access */
                 for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = gfn_to_mfn_query(d, i, &t);
+                    mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Fri Nov 11 18:11:34 2011 +0000
@@ -2265,7 +2265,7 @@
     if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
     {
         gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
-        mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
+        mfn_t gl3mfn = get_gfn_query(d, gl3gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
         else if ( p2mt != p2m_populate_on_demand )
@@ -2275,6 +2275,7 @@
         if ( mfn_valid(sl3mfn) )
             shadow_resync_all(v);
 #endif
+        put_gfn(d, gfn_x(gl3gfn));
     }
     l4e_propagate_from_guest(v, new_gl4e, sl3mfn, &new_sl4e, ft_prefetch);
 
@@ -2322,7 +2323,7 @@
     if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
     {
         gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
-        mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
+        mfn_t gl2mfn = get_gfn_query(v->domain, gl2gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
         else if ( p2mt != p2m_populate_on_demand )
@@ -2332,6 +2333,7 @@
         if ( mfn_valid(sl2mfn) )
             shadow_resync_all(v);
 #endif
+        put_gfn(v->domain, gfn_x(gl2gfn));
     }
     l3e_propagate_from_guest(v, new_gl3e, sl2mfn, &new_sl3e, ft_prefetch);
     result |= shadow_set_l3e(v, sl3p, new_sl3e, sl3mfn);
@@ -2371,11 +2373,12 @@
         }
         else
         {
-            mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
+            mfn_t gl1mfn = get_gfn_query(v->domain, gl1gfn, &p2mt);
             if ( p2m_is_ram(p2mt) )
                 sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 
             else if ( p2mt != p2m_populate_on_demand )
                 result |= SHADOW_SET_ERROR;
+            put_gfn(v->domain, gfn_x(gl1gfn));
         }
     }
     l2e_propagate_from_guest(v, new_gl2e, sl1mfn, &new_sl2e, ft_prefetch);
@@ -2441,7 +2444,7 @@
     perfc_incr(shadow_validate_gl1e_calls);
 
     gfn = guest_l1e_get_gfn(new_gl1e);
-    gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
+    gmfn = get_gfn_query(v->domain, gfn, &p2mt);
 
     l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
     result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
@@ -2463,6 +2466,7 @@
     }
 #endif /* OOS */
 
+    put_gfn(v->domain, gfn_x(gfn));
     return result;
 }
 
@@ -2501,10 +2505,11 @@
             shadow_l1e_t nsl1e;
 
             gfn = guest_l1e_get_gfn(gl1e);
-            gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
+            gmfn = get_gfn_query(v->domain, gfn, &p2mt);
             l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
             rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn);
 
+            put_gfn(v->domain, gfn_x(gfn));
             *snpl1p = gl1e;
         }
     });
@@ -2824,7 +2829,7 @@
 
         /* Look at the gfn that the l1e is pointing at */
         gfn = guest_l1e_get_gfn(gl1e);
-        gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
+        gmfn = get_gfn_query(v->domain, gfn, &p2mt);
 
         /* Propagate the entry.  */
         l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
@@ -2834,6 +2839,8 @@
         if ( snpl1p != NULL )
             snpl1p[i] = gl1e;
 #endif /* OOS */
+
+        put_gfn(v->domain, gfn_x(gfn));
     }
     if ( gl1p != NULL )
         sh_unmap_domain_page(gl1p);
@@ -3182,7 +3189,7 @@
 
     /* What mfn is the guest trying to access? */
     gfn = guest_l1e_get_gfn(gw.l1e);
-    gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);
+    gmfn = get_gfn_guest(d, gfn, &p2mt);
 
     if ( shadow_mode_refcounts(d) && 
          ((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
@@ -3192,6 +3199,7 @@
         SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n", 
                       gfn_x(gfn), mfn_x(gmfn));
         reset_early_unshadow(v);
+        put_gfn(d, gfn_x(gfn));
         goto propagate;
     }
 
@@ -3236,6 +3244,7 @@
     if ( rc & GW_RMWR_REWALK )
     {
         paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
         goto rewalk;
     }
 #endif /* OOS */
@@ -3244,6 +3253,7 @@
     {
         perfc_incr(shadow_inconsistent_gwalk);
         paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
         goto rewalk;
     }
 
@@ -3270,6 +3280,7 @@
         ASSERT(d->is_shutting_down);
 #endif
         paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
         trace_shadow_gen(TRC_SHADOW_DOMF_DYING, va);
         return 0;
     }
@@ -3287,6 +3298,7 @@
          * failed. We cannot safely continue since some page is still
          * OOS but not in the hash table anymore. */
         paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
         return 0;
     }
 
@@ -3296,6 +3308,7 @@
     {
         perfc_incr(shadow_inconsistent_gwalk);
         paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
         goto rewalk;
     }
 #endif /* OOS */
@@ -3389,6 +3402,7 @@
     SHADOW_PRINTK("fixed\n");
     shadow_audit_tables(v);
     paging_unlock(d);
+    put_gfn(d, gfn_x(gfn));
     return EXCRET_fault_fixed;
 
  emulate:
@@ -3457,6 +3471,7 @@
     sh_audit_gw(v, &gw);
     shadow_audit_tables(v);
     paging_unlock(d);
+    put_gfn(d, gfn_x(gfn));
 
     this_cpu(trace_emulate_write_val) = 0;
 
@@ -3595,6 +3610,7 @@
     shadow_audit_tables(v);
     reset_early_unshadow(v);
     paging_unlock(d);
+    put_gfn(d, gfn_x(gfn));
     trace_shadow_gen(TRC_SHADOW_MMIO, va);
     return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
             ? EXCRET_fault_fixed : 0);
@@ -3605,6 +3621,7 @@
     shadow_audit_tables(v);
     reset_early_unshadow(v);
     paging_unlock(d);
+    put_gfn(d, gfn_x(gfn));
 
 propagate:
     trace_not_shadow_fault(gw.l1e, va);
@@ -4292,7 +4309,7 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
+                gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
             }
@@ -4305,13 +4322,14 @@
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
+                gl2mfn = get_gfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
                                            ? SH_type_l2h_shadow 
                                            : SH_type_l2_shadow);
                 else
                     sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0); 
+                put_gfn(d, gfn_x(gl2gfn));
             }
             else
                 sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0); 
@@ -4689,11 +4707,12 @@
     int flush = 0;
     int fast_path = 0;
     paddr_t gcr3 = 0;
-    mfn_t smfn, gmfn;
     p2m_type_t p2mt;
     char *gl3pa = NULL;
     guest_l3e_t *gl3e = NULL;
     paddr_t gl2a = 0;
+    unsigned long l3gfn;
+    mfn_t l3mfn;
 
     paging_lock(v->domain);
 
@@ -4702,8 +4721,9 @@
     if ( gcr3 == gpa )
         fast_path = 1;
 
-    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
-    if ( !mfn_valid(gmfn) || !p2m_is_ram(p2mt) )
+    l3gfn = gpa >> PAGE_SHIFT;
+    l3mfn = get_gfn_query(v->domain, _gfn(l3gfn), &p2mt);
+    if ( !mfn_valid(l3mfn) || !p2m_is_ram(p2mt) )
     {
         printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
                gpa);
@@ -4711,19 +4731,24 @@
     }
     if ( !fast_path )
     {
-        gl3pa = sh_map_domain_page(gmfn);
+        gl3pa = sh_map_domain_page(l3mfn);
         gl3e = (guest_l3e_t *)(gl3pa + ((unsigned long)gpa & ~PAGE_MASK));
     }
     for ( i = 0; i < 4; i++ )
     {
+        unsigned long gfn;
+        mfn_t smfn, gmfn;
+
         if ( fast_path )
             smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[i]));
         else
         {
             /* retrieving the l2s */
             gl2a = guest_l3e_get_paddr(gl3e[i]);
-            gmfn = gfn_to_mfn_query(v->domain, _gfn(gl2a >> PAGE_SHIFT), 
&p2mt);
+            gfn = gl2a >> PAGE_SHIFT;
+            gmfn = get_gfn_query(v->domain, _gfn(gfn), &p2mt);
             smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_pae_shadow);
+            put_gfn(v->domain, gfn);
         }
 
         if ( mfn_valid(smfn) )
@@ -4747,6 +4772,7 @@
 out:
     if ( !fast_path )
         unmap_domain_page(gl3pa);
+    put_gfn(v->domain, l3gfn);
     paging_unlock(v->domain);
 }
 #else
@@ -4757,12 +4783,14 @@
 
     paging_lock(v->domain);
 
-    gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
+    gmfn = get_gfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
 #if GUEST_PAGING_LEVELS == 2
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
 #else
     smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
 #endif
+    put_gfn(v->domain, gpa >> PAGE_SHIFT);
+    
     if ( mfn_valid(smfn) )
     {
         mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
@@ -4811,15 +4839,22 @@
 
     /* Translate the GFN to an MFN */
     ASSERT(!paging_locked_by_me(v->domain));
-    mfn = gfn_to_mfn_guest(v->domain, _gfn(gfn), &p2mt);
+    mfn = get_gfn_guest(v->domain, _gfn(gfn), &p2mt);
         
     if ( p2m_is_readonly(p2mt) )
+    {
+        put_gfn(v->domain, gfn);
         return _mfn(READONLY_GFN);
+    }
     if ( !p2m_is_ram(p2mt) )
+    {
+        put_gfn(v->domain, gfn);
         return _mfn(BAD_GFN_TO_MFN);
+    }
 
     ASSERT(mfn_valid(mfn));
     v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
+    put_gfn(v->domain, gfn);
     return mfn;
 }
 
@@ -5220,7 +5255,7 @@
             {
                 gfn = guest_l1e_get_gfn(*gl1e);
                 mfn = shadow_l1e_get_mfn(*sl1e);
-                gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
+                gmfn = get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt);
                 if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -5291,16 +5326,17 @@
             mfn = shadow_l2e_get_mfn(*sl2e);
             gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
                 ? get_fl1_shadow_status(v, gfn)
-                : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt),
-                                    SH_type_l1_shadow);
+                : get_shadow_status(v, 
+                    get_gfn_query_unlocked(v->domain, gfn_x(gfn), 
+                                        &p2mt), SH_type_l1_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
                            " (--> %" PRI_mfn ")"
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn), 
                            (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
-                           : mfn_x(gfn_to_mfn_query(v->domain,
-                                   gfn, &p2mt)), mfn_x(gmfn), mfn_x(mfn));
+                           : mfn_x(get_gfn_query_unlocked(v->domain,
+                                   gfn_x(gfn), &p2mt)), mfn_x(gmfn), 
mfn_x(mfn));
         }
     });
     sh_unmap_domain_page(gp);
@@ -5339,7 +5375,8 @@
         {
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
-            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, 
&p2mt),
+            gmfn = get_shadow_status(v, get_gfn_query_unlocked(
+                                        v->domain, gfn_x(gfn), &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
                                        is_pv_32on64_vcpu(v))
                                       && !shadow_mode_external(v->domain)
@@ -5387,8 +5424,8 @@
         {
             gfn = guest_l4e_get_gfn(*gl4e);
             mfn = shadow_l4e_get_mfn(*sl4e);
-            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain,
-                                     gfn, &p2mt), 
+            gmfn = get_shadow_status(v, get_gfn_query_unlocked(
+                                     v->domain, gfn_x(gfn), &p2mt), 
                                      SH_type_l3_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/mm/shadow/types.h    Fri Nov 11 18:11:34 2011 +0000
@@ -191,11 +191,11 @@
 })
 #endif
 
- /* Override gfn_to_mfn to work with gfn_t */
-#undef gfn_to_mfn_query
-#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_query)
-#undef gfn_to_mfn_guest
-#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_guest)
+ /* Override get_gfn to work with gfn_t */
+#undef get_gfn_query
+#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_query)
+#undef get_gfn_guest
+#define get_gfn_guest(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_guest)
 
 /* The shadow types needed for the various levels. */
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/physdev.c    Fri Nov 11 18:11:34 2011 +0000
@@ -297,16 +297,20 @@
             break;
 
         ret = -EINVAL;
-        mfn = gmfn_to_mfn(current->domain, info.gmfn);
+        mfn = get_gfn_untyped(current->domain, info.gmfn);
         if ( !mfn_valid(mfn) ||
              !get_page_and_type(mfn_to_page(mfn), v->domain,
                                 PGT_writable_page) )
+        {
+            put_gfn(current->domain, info.gmfn);
             break;
+        }
 
         if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn,
                      0, mfn) != 0 )
         {
             put_page_and_type(mfn_to_page(mfn));
+            put_gfn(current->domain, info.gmfn);
             ret = -EBUSY;
             break;
         }
@@ -316,10 +320,12 @@
         {
             v->domain->arch.pv_domain.pirq_eoi_map_mfn = 0;
             put_page_and_type(mfn_to_page(mfn));
+            put_gfn(current->domain, info.gmfn);
             ret = -ENOSPC;
             break;
         }
 
+        put_gfn(current->domain, info.gmfn);
         ret = 0;
         break;
     }
diff -r 9af6829a9273 -r 3622d7fae14d xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/arch/x86/traps.c      Fri Nov 11 18:11:34 2011 +0000
@@ -673,11 +673,12 @@
             return 0;
         }
 
-        mfn = gmfn_to_mfn(d, gmfn);
+        mfn = get_gfn_untyped(d, gmfn);
 
         if ( !mfn_valid(mfn) ||
              !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
         {
+            put_gfn(d, gmfn);
             gdprintk(XENLOG_WARNING,
                      "Bad GMFN %lx (MFN %lx) to MSR %08x\n",
                      gmfn, mfn, base + idx);
@@ -689,6 +690,7 @@
         unmap_domain_page(hypercall_page);
 
         put_page_and_type(mfn_to_page(mfn));
+        put_gfn(d, gmfn);
         break;
     }
 
@@ -2347,18 +2349,25 @@
             arch_set_cr2(v, *reg);
             break;
 
-        case 3: /* Write CR3 */
+        case 3: {/* Write CR3 */
+            unsigned long mfn, gfn;
             domain_lock(v->domain);
             if ( !is_pv_32on64_vcpu(v) )
-                rc = new_guest_cr3(gmfn_to_mfn(v->domain, 
xen_cr3_to_pfn(*reg)));
+            {
+                gfn = xen_cr3_to_pfn(*reg);
 #ifdef CONFIG_COMPAT
-            else
-                rc = new_guest_cr3(gmfn_to_mfn(v->domain, 
compat_cr3_to_pfn(*reg)));
+            } else {
+                gfn = compat_cr3_to_pfn(*reg);
 #endif
+            }
+            mfn = get_gfn_untyped(v->domain, gfn);
+            rc = new_guest_cr3(mfn);
+            put_gfn(v->domain, gfn);
             domain_unlock(v->domain);
             if ( rc == 0 ) /* not okay */
                 goto fail;
             break;
+        }
 
         case 4: /* Write CR4 */
             v->arch.pv_vcpu.ctrlreg[4] = pv_guest_cr4_fixup(v, *reg);
diff -r 9af6829a9273 -r 3622d7fae14d xen/common/grant_table.c
--- a/xen/common/grant_table.c  Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/common/grant_table.c  Fri Nov 11 18:11:34 2011 +0000
@@ -110,7 +110,7 @@
 #define gfn_to_mfn_private(_d, _gfn) ({                     \
     p2m_type_t __p2mt;                                      \
     unsigned long __x;                                      \
-    __x = mfn_x(gfn_to_mfn_unshare((_d), (_gfn), &__p2mt)); \
+    __x = mfn_x(get_gfn_unshare((_d), (_gfn), &__p2mt));    \
     BUG_ON(p2m_is_shared(__p2mt)); /* XXX fixme */          \
     if ( !p2m_is_valid(__p2mt) )                            \
         __x = INVALID_MFN;                                  \
@@ -150,10 +150,10 @@
     mfn_t mfn;
 
     if ( readonly )
-        mfn = gfn_to_mfn(rd, gfn, &p2mt);
+        mfn = get_gfn(rd, gfn, &p2mt);
     else
     {
-        mfn = gfn_to_mfn_unshare(rd, gfn, &p2mt);
+        mfn = get_gfn_unshare(rd, gfn, &p2mt);
         BUG_ON(p2m_is_shared(p2mt));
         /* XXX Here, and above in gfn_to_mfn_private, need to handle
          * XXX failure to unshare. */
@@ -164,14 +164,16 @@
         if ( p2m_is_paging(p2mt) )
         {
             p2m_mem_paging_populate(rd, gfn);
+            put_gfn(rd, gfn);
             rc = GNTST_eagain;
         }
     } else {
+       put_gfn(rd, gfn);
        *frame = INVALID_MFN;
        rc = GNTST_bad_page;
     }
 #else
-    *frame = readonly ? gmfn_to_mfn(rd, gfn) : gfn_to_mfn_private(rd, gfn);
+    *frame = readonly ? get_gfn_untyped(rd, gfn) : gfn_to_mfn_private(rd, gfn);
 #endif
 
     return rc;
@@ -468,13 +470,14 @@
     struct domain *ld, *rd, *owner;
     struct vcpu   *led;
     int            handle;
+    unsigned long  gfn = INVALID_GFN;
     unsigned long  frame = 0, nr_gets = 0;
     struct page_info *pg;
     int            rc = GNTST_okay;
     u32            old_pin;
     u32            act_pin;
     unsigned int   cache_flags;
-    struct active_grant_entry *act;
+    struct active_grant_entry *act = NULL;
     struct grant_mapping *mt;
     grant_entry_v1_t *sha1;
     grant_entry_v2_t *sha2;
@@ -565,7 +568,6 @@
 
         if ( !act->pin )
         {
-            unsigned long gfn;
             unsigned long frame;
 
             gfn = sha1 ? sha1->frame : sha2->full_page.frame;
@@ -698,6 +700,7 @@
     op->handle       = handle;
     op->status       = GNTST_okay;
 
+    put_gfn(rd, gfn);
     rcu_unlock_domain(rd);
     return;
 
@@ -735,6 +738,8 @@
         gnttab_clear_flag(_GTF_reading, status);
 
  unlock_out:
+    if ( gfn != INVALID_GFN )
+        put_gfn(rd, gfn);
     spin_unlock(&rd->grant_table->lock);
     op->status = rc;
     put_maptrack_handle(ld->grant_table, handle);
@@ -1475,6 +1480,7 @@
         /* Check the passed page frame for basic validity. */
         if ( unlikely(!mfn_valid(mfn)) )
         { 
+            put_gfn(d, gop.mfn);
             gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
                     (unsigned long)gop.mfn);
             gop.status = GNTST_bad_page;
@@ -1484,6 +1490,7 @@
         page = mfn_to_page(mfn);
         if ( unlikely(is_xen_heap_page(page)) )
         { 
+            put_gfn(d, gop.mfn);
             gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
                     (unsigned long)gop.mfn);
             gop.status = GNTST_bad_page;
@@ -1492,6 +1499,7 @@
 
         if ( steal_page(d, page, 0) < 0 )
         {
+            put_gfn(d, gop.mfn);
             gop.status = GNTST_bad_page;
             goto copyback;
         }
@@ -1504,6 +1512,7 @@
         /* Find the target domain. */
         if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
         {
+            put_gfn(d, gop.mfn);
             gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
                     gop.domid);
             page->count_info &= ~(PGC_count_mask|PGC_allocated);
@@ -1514,6 +1523,7 @@
 
         if ( xsm_grant_transfer(d, e) )
         {
+            put_gfn(d, gop.mfn);
             gop.status = GNTST_permission_denied;
         unlock_and_copyback:
             rcu_unlock_domain(e);
@@ -1566,6 +1576,7 @@
                         e->tot_pages, e->max_pages, gop.ref, e->is_dying);
             spin_unlock(&e->page_alloc_lock);
             rcu_unlock_domain(e);
+            put_gfn(d, gop.mfn);
             page->count_info &= ~(PGC_count_mask|PGC_allocated);
             free_domheap_page(page);
             gop.status = GNTST_general_error;
@@ -1579,6 +1590,7 @@
         page_set_owner(page, e);
 
         spin_unlock(&e->page_alloc_lock);
+        put_gfn(d, gop.mfn);
 
         TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
 
@@ -1850,6 +1862,8 @@
         {
             gfn = sha1->frame;
             rc = __get_paged_frame(gfn, &grant_frame, readonly, rd);
+            /* We drop this immediately per the comments at the top */
+            put_gfn(rd, gfn);
             if ( rc != GNTST_okay )
                 goto unlock_out;
             act->gfn = gfn;
@@ -1862,6 +1876,7 @@
         {
             gfn = sha2->full_page.frame;
             rc = __get_paged_frame(gfn, &grant_frame, readonly, rd);
+            put_gfn(rd, gfn);
             if ( rc != GNTST_okay )
                 goto unlock_out;
             act->gfn = gfn;
@@ -1874,6 +1889,7 @@
         {
             gfn = sha2->sub_page.frame;
             rc = __get_paged_frame(gfn, &grant_frame, readonly, rd);
+            put_gfn(rd, gfn);
             if ( rc != GNTST_okay )
                 goto unlock_out;
             act->gfn = gfn;
@@ -1973,6 +1989,7 @@
     {
 #ifdef CONFIG_X86
         rc = __get_paged_frame(op->source.u.gmfn, &s_frame, 1, sd);
+        put_gfn(sd, op->source.u.gmfn);
         if ( rc != GNTST_okay )
             goto error_out;
 #else
@@ -2012,6 +2029,7 @@
     {
 #ifdef CONFIG_X86
         rc = __get_paged_frame(op->dest.u.gmfn, &d_frame, 0, dd);
+        put_gfn(dd, op->dest.u.gmfn);
         if ( rc != GNTST_okay )
             goto error_out;
 #else
diff -r 9af6829a9273 -r 3622d7fae14d xen/common/memory.c
--- a/xen/common/memory.c       Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/common/memory.c       Fri Nov 11 18:11:34 2011 +0000
@@ -162,11 +162,12 @@
     unsigned long mfn;
 
 #ifdef CONFIG_X86
-    mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
+    mfn = mfn_x(get_gfn(d, gmfn, &p2mt)); 
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
         guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
         p2m_mem_paging_drop_page(d, gmfn);
+        put_gfn(d, gmfn);
         return 1;
     }
 #else
@@ -174,6 +175,7 @@
 #endif
     if ( unlikely(!mfn_valid(mfn)) )
     {
+        put_gfn(d, gmfn);
         gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
                 d->domain_id, gmfn);
         return 0;
@@ -187,12 +189,14 @@
     {
         put_page_and_type(page);
         guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
+        put_gfn(d, gmfn);
         return 1;
     }
 
 #endif /* CONFIG_X86 */
     if ( unlikely(!get_page(page, d)) )
     {
+        put_gfn(d, gmfn);
         gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
         return 0;
     }
@@ -206,6 +210,7 @@
     guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
 
     put_page(page);
+    put_gfn(d, gmfn);
 
     return 1;
 }
@@ -265,7 +270,7 @@
     PAGE_LIST_HEAD(out_chunk_list);
     unsigned long in_chunk_order, out_chunk_order;
     xen_pfn_t     gpfn, gmfn, mfn;
-    unsigned long i, j, k;
+    unsigned long i, j, k = 0; /* gcc ... */
     unsigned int  memflags = 0;
     long          rc = 0;
     struct domain *d;
@@ -363,9 +368,10 @@
                 p2m_type_t p2mt;
 
                 /* Shared pages cannot be exchanged */
-                mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt));
+                mfn = mfn_x(get_gfn_unshare(d, gmfn + k, &p2mt));
                 if ( p2m_is_shared(p2mt) )
                 {
+                    put_gfn(d, gmfn + k);
                     rc = -ENOMEM;
                     goto fail; 
                 }
@@ -374,6 +380,7 @@
 #endif
                 if ( unlikely(!mfn_valid(mfn)) )
                 {
+                    put_gfn(d, gmfn + k);
                     rc = -EINVAL;
                     goto fail;
                 }
@@ -382,11 +389,13 @@
 
                 if ( unlikely(steal_page(d, page, MEMF_no_refcount)) )
                 {
+                    put_gfn(d, gmfn + k);
                     rc = -EINVAL;
                     goto fail;
                 }
 
                 page_list_add(page, &in_chunk_list);
+                put_gfn(d, gmfn + k);
             }
         }
 
@@ -487,8 +496,12 @@
  fail:
     /* Reassign any input pages we managed to steal. */
     while ( (page = page_list_remove_head(&in_chunk_list)) )
+    {
+        put_gfn(d, gmfn + k--);
         if ( assign_pages(d, page, 0, MEMF_no_refcount) )
             BUG();
+    }
+
  dying:
     rcu_unlock_domain(d);
     /* Free any output pages we managed to allocate. */
diff -r 9af6829a9273 -r 3622d7fae14d xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c     Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/common/tmem_xen.c     Fri Nov 11 18:11:34 2011 +0000
@@ -109,22 +109,28 @@
     struct page_info *page;
     int ret;
 
-    cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
+    cli_mfn = mfn_x(get_gfn(current->domain, cmfn, &t));
     if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
+    {
+            put_gfn(current->domain, (unsigned long) cmfn);
             return NULL;
+    }
     page = mfn_to_page(cli_mfn);
     if ( cli_write )
         ret = get_page_and_type(page, current->domain, PGT_writable_page);
     else
         ret = get_page(page, current->domain);
     if ( !ret )
+    {
+        put_gfn(current->domain, (unsigned long) cmfn);
         return NULL;
+    }
     *pcli_mfn = cli_mfn;
     *pcli_pfp = (pfp_t *)page;
     return map_domain_page(cli_mfn);
 }
 
-static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t 
*cli_pfp,
                                 unsigned long cli_mfn, bool_t mark_dirty)
 {
     if ( mark_dirty )
@@ -135,6 +141,7 @@
     else
         put_page((struct page_info *)cli_pfp);
     unmap_domain_page(cli_va);
+    put_gfn(current->domain, (unsigned long) cmfn);
 }
 #endif
 
@@ -169,7 +176,7 @@
               (pfn_offset+len <= PAGE_SIZE) )
         memcpy((char *)tmem_va+tmem_offset,(char *)cli_va+pfn_offset,len);
     if ( !tmemc )
-        cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
+        cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
     unmap_domain_page(tmem_va);
     return 1;
 }
@@ -197,7 +204,7 @@
     ASSERT(ret == LZO_E_OK);
     *out_va = dmem;
     if ( !tmemc )
-        cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
+        cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
     unmap_domain_page(cli_va);
     return 1;
 }
@@ -225,7 +232,7 @@
         memcpy((char *)cli_va+pfn_offset,(char *)tmem_va+tmem_offset,len);
     unmap_domain_page(tmem_va);
     if ( !tmemc )
-        cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
+        cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
     mb();
     return 1;
 }
@@ -249,7 +256,7 @@
     ASSERT(ret == LZO_E_OK);
     ASSERT(out_len == PAGE_SIZE);
     if ( !tmemc )
-        cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
+        cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
     mb();
     return 1;
 }
@@ -271,7 +278,7 @@
         memcpy((char *)cli_va,(char *)tmem_va,len);
     if ( len < PAGE_SIZE )
         memset((char *)cli_va+len,0,PAGE_SIZE-len);
-    cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
+    cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
     mb();
     return 1;
 }
diff -r 9af6829a9273 -r 3622d7fae14d xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/include/asm-ia64/mm.h Fri Nov 11 18:11:34 2011 +0000
@@ -549,6 +549,8 @@
 #define gmfn_to_mfn(_d, gpfn)                  \
     gmfn_to_mfn_foreign((_d), (gpfn))
 
+#define put_gfn(d, g)   ((void)0)
+
 #define __gpfn_invalid(_d, gpfn)                       \
        (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)
 
diff -r 9af6829a9273 -r 3622d7fae14d xen/include/asm-x86/guest_pt.h
--- a/xen/include/asm-x86/guest_pt.h    Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/include/asm-x86/guest_pt.h    Fri Nov 11 18:11:34 2011 +0000
@@ -51,9 +51,9 @@
     return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
 }
 
-/* Override gfn_to_mfn to work with gfn_t */
-#undef gfn_to_mfn
-#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
+/* Override get_gfn to work with gfn_t */
+#undef get_gfn
+#define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), p2m_alloc)
 
 
 /* Types of the guest's page tables and access functions for them */
diff -r 9af6829a9273 -r 3622d7fae14d xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Nov 11 17:46:19 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Fri Nov 11 18:11:34 2011 +0000
@@ -3,6 +3,7 @@
  *
  * physical-to-machine mappings for automatically-translated domains.
  *
+ * Copyright (c) 2011 GridCentric Inc. (Andres Lagar-Cavilla)
  * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
  * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
@@ -305,47 +306,66 @@
 
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
+/**** p2m query accessors. After calling any of the variants below, you
+ * need to call put_gfn(domain, gfn). If you don't, you'll lock the
+ * hypervisor. ****/
 
 /* Read a particular P2M table, mapping pages as we go.  Most callers
- * should _not_ call this directly; use the other gfn_to_mfn_* functions
+ * should _not_ call this directly; use the other get_gfn* functions
  * below unless you know you want to walk a p2m that isn't a domain's
  * main one.
  * If the lookup succeeds, the return value is != INVALID_MFN and 
  * *page_order is filled in with the order of the superpage (if any) that
  * the entry was found in.  */
-mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
+mfn_t get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
                     unsigned int *page_order);
 
 /* General conversion function from gfn to mfn */
-static inline mfn_t gfn_to_mfn_type(struct domain *d,
+static inline mfn_t get_gfn_type(struct domain *d,
                                     unsigned long gfn, p2m_type_t *t,
                                     p2m_query_t q)
 {
     p2m_access_t a;
-    return gfn_to_mfn_type_p2m(p2m_get_hostp2m(d), gfn, t, &a, q, NULL);
+    return get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, q, NULL);
 }
 
 /* Syntactic sugar: most callers will use one of these. 
- * N.B. gfn_to_mfn_query() is the _only_ one guaranteed not to take the
+ * N.B. get_gfn_query() is the _only_ one guaranteed not to take the
  * p2m lock; none of the others can be called with the p2m or paging
  * lock held. */
-#define gfn_to_mfn(d, g, t)         gfn_to_mfn_type((d), (g), (t), p2m_alloc)
-#define gfn_to_mfn_query(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_query)
-#define gfn_to_mfn_guest(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_guest)
-#define gfn_to_mfn_unshare(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_unshare)
+#define get_gfn(d, g, t)         get_gfn_type((d), (g), (t), p2m_alloc)
+#define get_gfn_query(d, g, t)   get_gfn_type((d), (g), (t), p2m_query)
+#define get_gfn_guest(d, g, t)   get_gfn_type((d), (g), (t), p2m_guest)
+#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), p2m_unshare)
 
 /* Compatibility function exporting the old untyped interface */
-static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
+static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long 
gpfn)
 {
     mfn_t mfn;
     p2m_type_t t;
-    mfn = gfn_to_mfn(d, gpfn, &t);
+    mfn = get_gfn(d, gpfn, &t);
     if ( p2m_is_valid(t) )
         return mfn_x(mfn);
     return INVALID_MFN;
 }
 
+/* This is a noop for now. */
+static inline void __put_gfn(struct p2m_domain *p2m, unsigned long gfn)
+{
+}
+
+#define put_gfn(d, gfn) __put_gfn(p2m_get_hostp2m((d)), (gfn))
+
+/* These are identical for now. The intent is to have the caller not worry 
+ * about put_gfn. To only be used in printk's, crash situations, or to 
+ * peek at a type. You're not holding the p2m entry exclsively after calling
+ * this. */
+#define get_gfn_unlocked(d, g, t)         get_gfn_type((d), (g), (t), 
p2m_alloc)
+#define get_gfn_query_unlocked(d, g, t)   get_gfn_type((d), (g), (t), 
p2m_query)
+#define get_gfn_guest_unlocked(d, g, t)   get_gfn_type((d), (g), (t), 
p2m_guest)
+#define get_gfn_unshare_unlocked(d, g, t) get_gfn_type((d), (g), (t), 
p2m_unshare)
+
 /* General conversion function from mfn to gfn */
 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
 {
@@ -529,7 +549,8 @@
 #define p2m_gfn_check_limit(d, g, o) 0
 #endif
 
-/* Directly set a p2m entry: only for use by p2m code */
+/* Directly set a p2m entry: only for use by p2m code. Does not need
+ * a call to put_gfn afterwards/ */
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Modify naming of queries into the p2m, Xen patchbot-unstable <=