[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3 of 4] xenpaging: add need_populate and paged_no_mfn checks


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Thu, 01 Dec 2011 12:09:19 +0100
  • Delivery-date: Thu, 01 Dec 2011 12:22:43 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1322737586 -3600
# Node ID c09ac3717a025a8ead44bbc795fedda715d134c7
# Parent  8147822efdee65d1f5b94656ab2032aedb76979f
xenpaging: add need_populate and paged_no_mfn checks

There is currently a mix of p2mt checks for the various paging types.
Some mean the p2mt needs to be populated, others mean a gfn without mfn.

Add a new p2m_do_populate() helper which covers the p2m_ram_paged and
p2m_ram_paging_out types. If a gfn is not in these states anymore another
populate request for the pager is not needed. This avoids a call to
p2m_mem_paging_populate() which in turn reduces the pressure on the ring
buffer because no temporary slot needs to be claimed. As such, this helper is
an optimization.

Modify the existing p2m_is_paged() helper which now covers also
p2m_ram_paging_in_start in addition to the current p2m_ram_paged type.  A gfn
in these two states is not backed by a mfn.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -66,7 +66,8 @@ static int hvmemul_do_io(
     ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(curr->domain, ram_gfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(curr->domain, ram_gfn);
         put_gfn(curr->domain, ram_gfn); 
         return X86EMUL_RETRY;
     }
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -363,7 +363,8 @@ static int hvm_set_ioreq_page(
     }
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(d, gmfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(d, gmfn);
         put_gfn(d, gmfn);
         return -ENOENT;
     }
@@ -1300,7 +1301,7 @@ int hvm_hap_nested_page_fault(unsigned l
 
 #ifdef __x86_64__
     /* Check if the page has been paged out */
-    if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
+    if ( p2m_do_populate(p2mt) )
         p2m_mem_paging_populate(v->domain, gfn);
 
     /* Mem sharing: unshare the page and try again */
@@ -1820,7 +1821,8 @@ static void *__hvm_map_guest_frame(unsig
     }
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(d, gfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(d, gfn);
         put_gfn(d, gfn);
         return NULL;
     }
@@ -2280,7 +2282,8 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(curr->domain, gfn);
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(curr->domain, gfn);
             put_gfn(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
@@ -3760,7 +3763,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn_t mfn = get_gfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(d, pfn);
+                if ( p2m_do_populate(t) )
+                    p2m_mem_paging_populate(d, pfn);
                 put_gfn(d, pfn);
                 rc = -EINVAL;
                 goto param_fail3;
@@ -3864,7 +3868,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn = get_gfn_unshare(d, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(d, pfn);
+                if ( p2m_do_populate(t) )
+                    p2m_mem_paging_populate(d, pfn);
                 put_gfn(d, pfn);
                 rc = -EINVAL;
                 goto param_fail4;
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3525,9 +3525,10 @@ int do_mmu_update(
             if ( !p2m_is_valid(p2mt) )
                 mfn = INVALID_MFN;
 
-            if ( p2m_is_paged(p2mt) )
+            if ( p2m_is_paged(p2mt) && !mfn_valid(mfn) )
             {
-                p2m_mem_paging_populate(pg_owner, gmfn);
+                if ( p2m_do_populate(p2mt) )
+                    p2m_mem_paging_populate(pg_owner, gmfn);
                 put_gfn(pt_owner, gmfn);
                 rc = -ENOENT;
                 break;
@@ -3565,15 +3566,10 @@ int do_mmu_update(
     
                     l1emfn = mfn_x(get_gfn(pg_owner, l1egfn, &l1e_p2mt));
 
-                    if ( p2m_is_paged(l1e_p2mt) )
+                    if ( p2m_is_paged(l1e_p2mt) && !mfn_valid(l1emfn) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
-                        put_gfn(pg_owner, l1egfn);
-                        rc = -ENOENT;
-                        break;
-                    }
-                    else if ( p2m_ram_paging_in_start == l1e_p2mt && 
!mfn_valid(mfn) )
-                    {
+                        if ( p2m_do_populate(l1e_p2mt) )
+                            p2m_mem_paging_populate(pg_owner, 
l1e_get_pfn(l1e));
                         put_gfn(pg_owner, l1egfn);
                         rc = -ENOENT;
                         break;
@@ -3613,15 +3609,10 @@ int do_mmu_update(
 
                     l2emfn = mfn_x(get_gfn(pg_owner, l2egfn, &l2e_p2mt));
 
-                    if ( p2m_is_paged(l2e_p2mt) )
+                    if ( p2m_is_paged(l2e_p2mt) && !mfn_valid(l2emfn) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l2egfn);
-                        put_gfn(pg_owner, l2egfn);
-                        rc = -ENOENT;
-                        break;
-                    }
-                    else if ( p2m_ram_paging_in_start == l2e_p2mt && 
!mfn_valid(mfn) )
-                    {
+                        if ( p2m_do_populate(l2e_p2mt) )
+                            p2m_mem_paging_populate(pg_owner, l2egfn);
                         put_gfn(pg_owner, l2egfn);
                         rc = -ENOENT;
                         break;
@@ -3647,15 +3638,10 @@ int do_mmu_update(
 
                     l3emfn = mfn_x(get_gfn(pg_owner, l3egfn, &l3e_p2mt));
 
-                    if ( p2m_is_paged(l3e_p2mt) )
+                    if ( p2m_is_paged(l3e_p2mt) && !mfn_valid(l3emfn) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l3egfn);
-                        put_gfn(pg_owner, l3egfn);
-                        rc = -ENOENT;
-                        break;
-                    }
-                    else if ( p2m_ram_paging_in_start == l3e_p2mt && 
!mfn_valid(mfn) )
-                    {
+                        if ( p2m_do_populate(l3e_p2mt) )
+                            p2m_mem_paging_populate(pg_owner, l3egfn);
                         put_gfn(pg_owner, l3egfn);
                         rc = -ENOENT;
                         break;
@@ -3681,15 +3667,10 @@ int do_mmu_update(
 
                     l4emfn = mfn_x(get_gfn(pg_owner, l4egfn, &l4e_p2mt));
 
-                    if ( p2m_is_paged(l4e_p2mt) )
+                    if ( p2m_is_paged(l4e_p2mt) && !mfn_valid(l4emfn) )
                     {
-                        p2m_mem_paging_populate(pg_owner, l4egfn);
-                        put_gfn(pg_owner, l4egfn);
-                        rc = -ENOENT;
-                        break;
-                    }
-                    else if ( p2m_ram_paging_in_start == l4e_p2mt && 
!mfn_valid(mfn) )
-                    {
+                        if ( p2m_do_populate(l4e_p2mt) )
+                            p2m_mem_paging_populate(pg_owner, l4egfn);
                         put_gfn(pg_owner, l4egfn);
                         rc = -ENOENT;
                         break;
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -102,7 +102,8 @@ static inline void *map_domain_gfn(struc
     if ( p2m_is_paging(*p2mt) )
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
-        p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+        if ( p2m_do_populate(*p2mt) )
+            p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
         __put_gfn(p2m, gfn_x(gfn));
         *rc = _PAGE_PAGED;
         return NULL;
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -64,7 +64,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
     if ( p2m_is_paging(p2mt) )
     {
         ASSERT(!p2m_is_nestedp2m(p2m));
-        p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
         __put_gfn(p2m, top_gfn);
@@ -101,7 +102,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
         if ( p2m_is_paging(p2mt) )
         {
             ASSERT(!p2m_is_nestedp2m(p2m));
-            p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
             __put_gfn(p2m, gfn_x(gfn));
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -375,8 +375,7 @@ ept_set_entry(struct p2m_domain *p2m, un
          * Read-then-write is OK because we hold the p2m lock. */
         old_entry = *ept_entry;
 
-        if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
-             (p2mt == p2m_ram_paging_in_start) )
+        if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) )
         {
             /* Construct the new entry, and then write it once */
             new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
diff -r 8147822efdee -r c09ac3717a02 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -313,7 +313,7 @@ static void p2m_mem_paging_wait(mfn_t *m
         return;
 
     /* Populate the page once */
-    if ( *t == p2m_ram_paging_out || *t == p2m_ram_paged )
+    if ( p2m_do_populate(*t) )
         p2m_mem_paging_populate(p2m->domain, gfn);
 
     wait_event(pmpq->wq, p2m_mem_paging_get_entry(mfn, p2m, gfn, t, a, q, 
page_order));
@@ -1111,7 +1111,7 @@ void p2m_mem_paging_populate(struct doma
     p2m_lock(p2m);
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
     /* Forward the state only if gfn is in page-out path */
-    if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged ) {
+    if ( p2m_do_populate(p2mt) ) {
         /* Ignore foreign requests to allow mmap in pager */
         if ( mfn_valid(mfn) && p2mt == p2m_ram_paging_out && v->domain == d ) {
             /* Restore gfn because it is needed by guest before evict */
diff -r 8147822efdee -r c09ac3717a02 xen/common/grant_table.c
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -163,7 +163,8 @@ static int __get_paged_frame(unsigned lo
         *frame = mfn_x(mfn);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(rd, gfn);
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(rd, gfn);
             put_gfn(rd, gfn);
             rc = GNTST_eagain;
         }
diff -r 8147822efdee -r c09ac3717a02 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -158,7 +158,11 @@ typedef enum {
                           | p2m_to_mask(p2m_ram_paging_in_start) \
                           | p2m_to_mask(p2m_ram_paging_in))
 
-#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#define P2M_POPULATE_TYPES (p2m_to_mask(p2m_ram_paged) \
+                            | p2m_to_mask(p2m_ram_paging_out) )
+
+#define P2M_PAGED_NO_MFN_TYPES (p2m_to_mask(p2m_ram_paged) \
+                               | p2m_to_mask(p2m_ram_paging_in_start) )
 
 /* Shared types */
 /* XXX: Sharable types could include p2m_ram_ro too, but we would need to
@@ -183,7 +187,8 @@ typedef enum {
 #define p2m_has_emt(_t)  (p2m_to_mask(_t) & (P2M_RAM_TYPES | 
p2m_to_mask(p2m_mmio_direct)))
 #define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
 #define p2m_is_paging(_t)   (p2m_to_mask(_t) & P2M_PAGING_TYPES)
-#define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_TYPES)
+#define p2m_do_populate(_t) (p2m_to_mask(_t) & P2M_POPULATE_TYPES)
+#define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_NO_MFN_TYPES)
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
 #define p2m_is_broken(_t)   (p2m_to_mask(_t) & P2M_BROKEN_TYPES)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.