WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 12/17] xenpaging: handle HVMCOPY_gfn_paged_out in cop

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 12/17] xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Mon, 06 Dec 2010 21:59:19 +0100
Delivery-date: Mon, 06 Dec 2010 13:28:52 -0800
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1291669160; l=6640; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=d7OJNKBUbB3j2szNLalsIgNL86g=; b=OsgX/uB00mLCuHaf1FpIpfpgEzhjWAVxotCobWc/ncGlnqXdP141RbncWu4clX3Op9A WBeLdV78l8MVGeuTgEXflhzC0VJ/l3OV7oXejWTwBShAuc/hseE6S8YH6WtUCRYyDv8Q0 GQDLYcaRa4eLGkSzC33j+6AOp2KtQgPIKj8=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101206205907.848643876@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
copy_from_user_hvm can fail when __hvm_copy returns
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
pagetable walk.  This has to be handled in some way.


Use the recently added wait_queue feature to preempt the current vcpu
when populate a page, then resume execution later when the page was
resumed. This is only done if the active domain needs to access the
page, because in this case the vcpu would leave the active state anyway.


This patch adds a return code to p2m_mem_paging_populate() to indicate
the caller that the page was ready, so it can retry the gfn_to_mfn call.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/hvm.c           |    3 ++-
 xen/arch/x86/mm/guest_walk.c     |    5 +++--
 xen/arch/x86/mm/hap/guest_walk.c |   10 ++++++----
 xen/arch/x86/mm/p2m.c            |   19 ++++++++++++++-----
 xen/common/domain.c              |    1 +
 xen/include/asm-x86/p2m.h        |    7 ++++---
 xen/include/xen/sched.h          |    3 +++
 7 files changed, 33 insertions(+), 15 deletions(-)

--- xen-unstable.hg-4.1.22459.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22459/xen/arch/x86/hvm/hvm.c
@@ -1939,7 +1939,8 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            if ( p2m_mem_paging_populate(p2m, gfn) )
+                continue;
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
--- xen-unstable.hg-4.1.22459.orig/xen/arch/x86/mm/guest_walk.c
+++ xen-unstable.hg-4.1.22459/xen/arch/x86/mm/guest_walk.c
@@ -93,11 +93,12 @@ static inline void *map_domain_gfn(struc
                                    uint32_t *rc) 
 {
     /* Translate the gfn, unsharing if shared */
+retry:
     *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0);
     if ( p2m_is_paging(*p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn_x(gfn));
-
+        if ( p2m_mem_paging_populate(p2m, gfn_x(gfn)) )
+            goto retry;
         *rc = _PAGE_PAGED;
         return NULL;
     }
--- xen-unstable.hg-4.1.22459.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-unstable.hg-4.1.22459/xen/arch/x86/mm/hap/guest_walk.c
@@ -46,12 +46,13 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     /* Get the top-level table's MFN */
+retry_cr3:
     cr3 = v->arch.hvm_vcpu.guest_cr[3];
     top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
-
+        if ( p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT) )
+            goto retry_cr3;
         pfec[0] = PFEC_page_paged;
         return INVALID_GFN;
     }
@@ -79,11 +80,12 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     if ( missing == 0 )
     {
         gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
+retry_missing:
         gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn_x(gfn));
-
+            if ( p2m_mem_paging_populate(p2m, gfn_x(gfn)) )
+                goto retry_missing;
             pfec[0] = PFEC_page_paged;
             return INVALID_GFN;
         }
--- xen-unstable.hg-4.1.22459.orig/xen/arch/x86/mm/p2m.c
+++ xen-unstable.hg-4.1.22459/xen/arch/x86/mm/p2m.c
@@ -2777,16 +2777,17 @@ void p2m_mem_paging_drop_page(struct p2m
     }
 }
 
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
 {
     struct vcpu *v = current;
     mem_event_request_t req;
     p2m_type_t p2mt;
     struct domain *d = p2m->domain;
+    int ret = 0;
 
     /* Check that there's space on the ring for this request */
     if ( mem_event_check_ring(d, 1) )
-        return;
+        return ret;
 
     memset(&req, 0, sizeof(req));
 
@@ -2805,13 +2806,13 @@ void p2m_mem_paging_populate(struct p2m_
     /* Pause domain */
     if ( v->domain->domain_id == d->domain_id )
     {
-        vcpu_pause_nosync(v);
         req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+        ret = 1;
     }
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        return;
+        goto populate_out;
     }
 
     /* Send request to pager */
@@ -2820,6 +2821,14 @@ void p2m_mem_paging_populate(struct p2m_
     req.vcpu_id = v->vcpu_id;
 
     mem_event_put_request(d, &req);
+
+    if ( req.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+    {
+        wait_event(d->wq, mfn_valid(gfn_to_mfn(p2m, gfn, &p2mt)) && 
!p2m_is_paging(p2mt));
+    }
+
+populate_out:
+    return ret;
 }
 
 int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn)
@@ -2863,7 +2872,7 @@ void p2m_mem_paging_resume(struct p2m_do
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
-        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+        wake_up(&d->wq);
 
     /* Unpause any domains that were paused because the ring was full */
     mem_event_unpause_vcpus(d);
--- xen-unstable.hg-4.1.22459.orig/xen/common/domain.c
+++ xen-unstable.hg-4.1.22459/xen/common/domain.c
@@ -244,6 +244,7 @@ struct domain *domain_create(
     spin_lock_init(&d->node_affinity_lock);
 
     spin_lock_init(&d->shutdown_lock);
+    init_waitqueue_head(&d->wq);
     d->shutdown_code = -1;
 
     if ( domcr_flags & DOMCRF_hvm )
--- xen-unstable.hg-4.1.22459.orig/xen/include/asm-x86/p2m.h
+++ xen-unstable.hg-4.1.22459/xen/include/asm-x86/p2m.h
@@ -474,7 +474,8 @@ int p2m_mem_paging_evict(struct p2m_doma
 /* Tell xenpaging to drop a paged out frame */
 void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
 /* Start populating a paged out frame */
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
+/* retval 1 means the page is present on return */
+int p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
 /* Prepare the p2m for paging a frame in */
 int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn);
 /* Resume normal operation (in case a domain was paused) */
@@ -482,8 +483,8 @@ void p2m_mem_paging_resume(struct p2m_do
 #else
 static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned 
long gfn)
 { }
-static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned 
long gfn)
-{ }
+static inline int p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned 
long gfn)
+{ return 0; }
 #endif
 
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
--- xen-unstable.hg-4.1.22459.orig/xen/include/xen/sched.h
+++ xen-unstable.hg-4.1.22459/xen/include/xen/sched.h
@@ -26,6 +26,7 @@
 #include <xen/cpumask.h>
 #include <xen/nodemask.h>
 #include <xen/multicall.h>
+#include <xen/wait.h>
 
 #ifdef CONFIG_COMPAT
 #include <compat/vcpu.h>
@@ -332,6 +333,8 @@ struct domain
     nodemask_t node_affinity;
     unsigned int last_alloc_node;
     spinlock_t node_affinity_lock;
+
+    struct waitqueue_head wq;
 };
 
 struct domain_setup_info


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>