WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 09/18] xenpaging: populate only paged-out pages

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 09/18] xenpaging: populate only paged-out pages
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Fri, 15 Oct 2010 16:12:11 +0200
Delivery-date: Fri, 15 Oct 2010 07:22:14 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1287151937; l=3623; s=domk; d=aepfle.de; h=References:Subject:To:From:Date:X-RZG-CLASS-ID:X-RZG-AUTH; bh=IF+P2D1r+N9PC2gqiAUBxeMrZv0=; b=J1K+TtITR0UjjfRp/cKhzdjFN2/6/1FnXONdrPr5C2Wi81sCUht3njY6Zyx+IYUh3nv p2jpdAxaVx8qFaTkbXD7BkYoGvUm6ACi/+31HRD3VfjulwPOBnbu+F0w13LoC/Gz0n0EH Rm88vfAcEAdGRPAsjDkfbPDFNjjFCDuisp4=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20101015141202.309585877@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.48-4.4
populdate a paged-out page only once to reduce pressure in the ringbuffer.
Several cpus may still request a page at once. xenpaging can handle this.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/emulate.c       |    3 ++-
 xen/arch/x86/hvm/hvm.c           |   17 ++++++++++-------
 xen/arch/x86/mm/guest_walk.c     |    3 ++-
 xen/arch/x86/mm/hap/guest_walk.c |    6 ++++--
 4 files changed, 18 insertions(+), 11 deletions(-)

--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/emulate.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/emulate.c
@@ -65,7 +65,8 @@ static int hvmemul_do_io(
     ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(curr->domain, ram_gfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(curr->domain, ram_gfn);
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
--- xen-4.0.1-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.0.1-testing/xen/arch/x86/hvm/hvm.c
@@ -291,7 +291,8 @@ static int hvm_set_ioreq_page(
         return -EINVAL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(d, gmfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(d, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1324,7 +1325,8 @@ static void *hvm_map_entry(unsigned long
     mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(current->domain, gfn);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(current->domain, gfn);
         return NULL;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1723,7 +1725,8 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(curr->domain, gfn);
+            if ( p2m_is_paged(p2mt) )
+                p2m_mem_paging_populate(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -3032,8 +3035,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn_t mfn = gfn_to_mfn(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(d, pfn);
-
+                if ( p2m_is_paged(t) )
+                    p2m_mem_paging_populate(d, pfn);
                 rc = -EINVAL;
                 goto param_fail3;
             }
@@ -3096,8 +3099,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn = gfn_to_mfn_unshare(d, pfn, &t, 0);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(d, pfn);
-
+                if ( p2m_is_paged(t) )
+                    p2m_mem_paging_populate(d, pfn);
                 rc = -EINVAL;
                 goto param_fail4;
             }
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/guest_walk.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/guest_walk.c
@@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc
     *mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0);
     if ( p2m_is_paging(*p2mt) )
     {
-        p2m_mem_paging_populate(d, gfn_x(gfn));
+        if ( p2m_is_paged(*p2mt) )
+            p2m_mem_paging_populate(d, gfn_x(gfn));
 
         *rc = _PAGE_PAGED;
         return NULL;
--- xen-4.0.1-testing.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-4.0.1-testing/xen/arch/x86/mm/hap/guest_walk.c
@@ -49,7 +49,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
+        if ( p2m_is_paged(p2mt) )
+            p2m_mem_paging_populate(v->domain, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
         return INVALID_GFN;
@@ -81,7 +82,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
         gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(v->domain, gfn_x(gfn));
+            if ( p2m_is_paged(p2mt) )
+                p2m_mem_paging_populate(v->domain, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
             return INVALID_GFN;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>