WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm/p2m: merge gfn_to_mfn_unshare with

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm/p2m: merge gfn_to_mfn_unshare with other gfn_to_mfn paths.
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Thu, 16 Jun 2011 11:12:31 +0100
Delivery-date: Thu, 16 Jun 2011 03:27:47 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1307017012 -3600
# Node ID 0d3e0a571fdddc873d1ff3750d15db6fc58fff8e
# Parent  a7d8612c9ba14ae6efbf420e213c983902433942
x86/mm/p2m: merge gfn_to_mfn_unshare with other gfn_to_mfn paths.

gfn_to_mfn_unshare() had its own function despite all other lookup types
being handled in one place. Merge it into _gfn_to_mfn_type(), so that it
gets the benefit of broken-page protection, for example, and tidy its
interfaces up to fit.

The unsharing code still has a lot of bugs, e.g.
 - failure to alloc for unshare on a foreign lookup still BUG()s,
 - at least one race condition in unshare-and-retry
 - p2m_* lookup types should probably be flags, not enum
but it's cleaner and will make later p2m cleanups easier.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Thu Jun 02 13:16:52 2011 +0100
@@ -63,7 +63,7 @@
     int rc;
 
     /* Check for paged out page */
-    ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0);
+    ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt);
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(p2m, ram_gfn);
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 02 13:16:52 2011 +0100
@@ -352,7 +352,7 @@
     unsigned long mfn;
     void *va;
 
-    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt, 0));
+    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt));
     if ( !p2m_is_ram(p2mt) )
         return -EINVAL;
     if ( p2m_is_paging(p2mt) )
@@ -1767,7 +1767,7 @@
     struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
 
     mfn = mfn_x(writable
-                ? gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)
+                ? gfn_to_mfn_unshare(p2m, gfn, &p2mt)
                 : gfn_to_mfn(p2m, gfn, &p2mt));
     if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
         return NULL;
@@ -2229,7 +2229,7 @@
             gfn = addr >> PAGE_SHIFT;
         }
 
-        mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0));
+        mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt));
 
         if ( p2m_is_paging(p2mt) )
         {
@@ -3724,7 +3724,7 @@
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
-            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
+            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
             else if ( p2m_is_readonly(t) )
@@ -3779,7 +3779,7 @@
             p2m_type_t t;
             p2m_type_t nt;
             mfn_t mfn;
-            mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
+            mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
             if ( p2m_is_paging(t) )
             {
                 p2m_mem_paging_populate(p2m, pfn);
@@ -3877,7 +3877,7 @@
             mfn_t mfn;
             int success;
 
-            mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
+            mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
 
             p2m_lock(p2m);
             success = p2m->set_entry(p2m, pfn, mfn, 0, t, 
memaccess[a.hvmmem_access]);
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm.c Thu Jun 02 13:16:52 2011 +0100
@@ -4653,7 +4653,7 @@
             p2m_type_t p2mt;
 
             xatp.idx = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d),
-                                                xatp.idx, &p2mt, 0));
+                                                xatp.idx, &p2mt));
             /* If the page is still shared, exit early */
             if ( p2m_is_shared(p2mt) )
             {
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Thu Jun 02 13:16:52 2011 +0100
@@ -93,7 +93,7 @@
                                    uint32_t *rc) 
 {
     /* Translate the gfn, unsharing if shared */
-    *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0);
+    *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt);
     if ( p2m_is_paging(*p2mt) )
     {
         p2m_mem_paging_populate(p2m, gfn_x(gfn));
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/hap/guest_walk.c  Thu Jun 02 13:16:52 2011 +0100
@@ -57,7 +57,7 @@
     walk_t gw;
 
     /* Get the top-level table's MFN */
-    top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0);
+    top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt);
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
@@ -89,7 +89,7 @@
     if ( missing == 0 )
     {
         gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
-        gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0);
+        gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt);
         if ( p2m_is_paging(p2mt) )
         {
             p2m_mem_paging_populate(p2m, gfn_x(gfn));
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Thu Jun 02 13:16:52 2011 +0100
@@ -294,8 +294,7 @@
 
 
 static struct page_info* mem_sharing_alloc_page(struct domain *d, 
-                                                unsigned long gfn,
-                                                int must_succeed)
+                                                unsigned long gfn)
 {
     struct page_info* page;
     struct vcpu *v = current;
@@ -307,21 +306,20 @@
     memset(&req, 0, sizeof(req));
     req.type = MEM_EVENT_TYPE_SHARED;
 
-    if(must_succeed) 
+    if ( v->domain != d )
     {
-        /* We do not support 'must_succeed' any more. External operations such
-         * as grant table mappings may fail with OOM condition! 
-         */
-        BUG();
+        /* XXX This path needs some attention.  For now, just fail foreign 
+         * XXX requests to unshare if there's no memory.  This replaces 
+         * XXX old code that BUG()ed here; the callers now BUG()
+         * XXX elewhere. */
+        gdprintk(XENLOG_ERR, 
+                 "Failed alloc on unshare path for foreign (%d) lookup\n",
+                 d->domain_id);
+        return page;
     }
-    else
-    {
-        /* All foreign attempts to unshare pages should be handled through
-         * 'must_succeed' case. */
-        ASSERT(v->domain->domain_id == d->domain_id);
-        vcpu_pause_nosync(v);
-        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
-    }
+
+    vcpu_pause_nosync(v);
+    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
     /* XXX: Need to reserve a request, not just check the ring! */
     if(mem_event_check_ring(d)) return page;
@@ -692,8 +690,7 @@
     if(ret == 0) goto private_page_found;
         
     old_page = page;
-    page = mem_sharing_alloc_page(d, gfn, flags & MEM_SHARING_MUST_SUCCEED);
-    BUG_ON(!page && (flags & MEM_SHARING_MUST_SUCCEED));
+    page = mem_sharing_alloc_page(d, gfn);
     if(!page) 
     {
         /* We've failed to obtain memory for private page. Need to re-add the
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/common/grant_table.c
--- a/xen/common/grant_table.c  Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/common/grant_table.c  Thu Jun 02 13:16:52 2011 +0100
@@ -110,7 +110,8 @@
 #define gfn_to_mfn_private(_d, _gfn) ({                     \
     p2m_type_t __p2mt;                                      \
     unsigned long __x;                                      \
-    __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt, 1));  \
+    __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt));  \
+    BUG_ON(p2m_is_shared(__p2mt)); /* XXX fixme */          \
     if ( !p2m_is_valid(__p2mt) )                            \
         __x = INVALID_MFN;                                  \
     __x; })
@@ -153,7 +154,12 @@
     if ( readonly )
         mfn = gfn_to_mfn(p2m, gfn, &p2mt);
     else
-        mfn = gfn_to_mfn_unshare(p2m, gfn, &p2mt, 1);
+    {
+        mfn = gfn_to_mfn_unshare(p2m, gfn, &p2mt);
+        BUG_ON(p2m_is_shared(p2mt));
+        /* XXX Here, and above in gfn_to_mfn_private, need to handle
+         * XXX failure to unshare. */
+    }
 
     if ( p2m_is_valid(p2mt) ) {
         *frame = mfn_x(mfn);
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/common/memory.c
--- a/xen/common/memory.c       Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/common/memory.c       Thu Jun 02 13:16:52 2011 +0100
@@ -363,7 +363,7 @@
                 p2m_type_t p2mt;
 
                 /* Shared pages cannot be exchanged */
-                mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, 
&p2mt, 0));
+                mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, 
&p2mt));
                 if ( p2m_is_shared(p2mt) )
                 {
                     rc = -ENOMEM;
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/include/asm-x86/mem_sharing.h
--- a/xen/include/asm-x86/mem_sharing.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/mem_sharing.h Thu Jun 02 13:16:52 2011 +0100
@@ -34,7 +34,6 @@
                               unsigned long gfn,
                               int expected_refcnt,
                               shr_handle_t *phandle);
-#define MEM_SHARING_MUST_SUCCEED      (1<<0)
 #define MEM_SHARING_DESTROY_GFN       (1<<1)
 int mem_sharing_unshare_page(struct p2m_domain *p2m, 
                              unsigned long gfn, 
diff -r a7d8612c9ba1 -r 0d3e0a571fdd xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Thu Jun 02 13:16:52 2011 +0100
@@ -112,9 +112,10 @@
 } p2m_access_t;
 
 typedef enum {
-    p2m_query = 0,              /* Do not populate a PoD entries      */
-    p2m_alloc = 1,              /* Automatically populate PoD entries */
-    p2m_guest = 2,              /* Guest demand-fault; implies alloc  */
+    p2m_query,              /* Do not populate a PoD entries      */
+    p2m_alloc,              /* Automatically populate PoD entries */
+    p2m_unshare,            /* Break c-o-w sharing; implies alloc */
+    p2m_guest,              /* Guest demand-fault; implies alloc  */
 } p2m_query_t;
 
 /* We use bitmaps and maks to handle groups of types */
@@ -367,6 +368,14 @@
     mfn_t mfn = p2m->get_entry(p2m, gfn, t, a, q);
 
 #ifdef __x86_64__
+    if ( q == p2m_unshare && p2m_is_shared(*t) )
+    {
+        mem_sharing_unshare_page(p2m, gfn, 0);
+        mfn = p2m->get_entry(p2m, gfn, t, a, q);
+    }
+#endif
+
+#ifdef __x86_64__
     if (unlikely((p2m_is_broken(*t))))
     {
         /* Return invalid_mfn to avoid caller's access */
@@ -401,32 +410,7 @@
 #define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_query)
 #define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), 
p2m_guest)
-
-static inline mfn_t gfn_to_mfn_unshare(struct p2m_domain *p2m,
-                                       unsigned long gfn,
-                                       p2m_type_t *p2mt,
-                                       int must_succeed)
-{
-    mfn_t mfn;
-
-    mfn = gfn_to_mfn(p2m, gfn, p2mt);
-#ifdef __x86_64__
-    if ( p2m_is_shared(*p2mt) )
-    {
-        if ( mem_sharing_unshare_page(p2m, gfn,
-                                      must_succeed 
-                                      ? MEM_SHARING_MUST_SUCCEED : 0) )
-        {
-            BUG_ON(must_succeed);
-            return mfn;
-        }
-        mfn = gfn_to_mfn(p2m, gfn, p2mt);
-    }
-#endif
-
-    return mfn;
-}
-
+#define gfn_to_mfn_unshare(p, g, t) _gfn_to_mfn_type((p), (g), (t), 
p2m_unshare)
 
 /* Compatibility function exporting the old untyped interface */
 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm/p2m: merge gfn_to_mfn_unshare with other gfn_to_mfn paths., Xen patchbot-unstable <=