[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 03/18] x86/mem_sharing: make get_two_gfns take locks conditionally



During VM forking the client lock will already be taken.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
Acked-by: Andrew Coopers <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/mm/mem_sharing.c | 11 ++++++-----
 xen/include/asm-x86/p2m.h     | 10 +++++-----
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index ddf1f0f9f9..f6187403a0 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -955,7 +955,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     unsigned long put_count = 0;
 
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
-                 cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
+                 cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg, true);
 
     /*
      * This tricky business is to avoid two callers deadlocking if
@@ -1073,7 +1073,7 @@ err_out:
 }
 
 int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, 
shr_handle_t sh,
-                               struct domain *cd, unsigned long cgfn)
+                               struct domain *cd, unsigned long cgfn, bool 
lock)
 {
     struct page_info *spage;
     int ret = -EINVAL;
@@ -1085,7 +1085,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
     struct two_gfns tg;
 
     get_two_gfns(sd, _gfn(sgfn), &smfn_type, NULL, &smfn,
-                 cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg);
+                 cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg, lock);
 
     /* Get the source shared page, check and lock */
     ret = XENMEM_SHARING_OP_S_HANDLE_INVALID;
@@ -1162,7 +1162,8 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
 err_unlock:
     mem_sharing_page_unlock(spage);
 err_out:
-    put_two_gfns(&tg);
+    if ( lock )
+        put_two_gfns(&tg);
     return ret;
 }
 
@@ -1583,7 +1584,7 @@ int 
mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
         sh      = mso.u.share.source_handle;
         cgfn    = mso.u.share.client_gfn;
 
-        rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
+        rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn, true);
 
         rcu_unlock_domain(cd);
     }
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..7399c4a897 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -539,7 +539,7 @@ struct two_gfns {
 static inline void get_two_gfns(struct domain *rd, gfn_t rgfn,
         p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld,
         gfn_t lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn,
-        p2m_query_t q, struct two_gfns *rval)
+        p2m_query_t q, struct two_gfns *rval, bool lock)
 {
     mfn_t           *first_mfn, *second_mfn, scratch_mfn;
     p2m_access_t    *first_a, *second_a, scratch_a;
@@ -569,10 +569,10 @@ do {                                                    \
 #undef assign_pointers
 
     /* Now do the gets */
-    *first_mfn  = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
-                                      gfn_x(rval->first_gfn), first_t, 
first_a, q, NULL);
-    *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
-                                      gfn_x(rval->second_gfn), second_t, 
second_a, q, NULL);
+    *first_mfn  = __get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
+                                        gfn_x(rval->first_gfn), first_t, 
first_a, q, NULL, lock);
+    *second_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
+                                        gfn_x(rval->second_gfn), second_t, 
second_a, q, NULL, lock);
 }
 
 static inline void put_two_gfns(struct two_gfns *arg)
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.