[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 3/3] x86: use paging_mark_pfn_dirty()



... in preference over paging_mark_dirty(), when the PFN is known
anyway.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>

--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -219,14 +219,12 @@ static int modified_memory(struct domain
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
             if ( page )
             {
-                mfn_t gmfn = _mfn(page_to_mfn(page));
-
-                paging_mark_dirty(d, gmfn);
+                paging_mark_pfn_dirty(d, _pfn(pfn));
                 /*
                  * These are most probably not page tables any more
                  * don't take a long time and don't die either.
                  */
-                sh_remove_shadows(d, gmfn, 1, 0);
+                sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
                 put_page(page);
             }
         }
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1893,7 +1893,7 @@ int hvm_hap_nested_page_fault(paddr_t gp
          */
         if ( npfec.write_access )
         {
-            paging_mark_dirty(currd, mfn);
+            paging_mark_pfn_dirty(currd, _pfn(gfn));
             /*
              * If p2m is really an altp2m, unlock here to avoid lock ordering
              * violation when the change below is propagated from host p2m.
@@ -2591,7 +2591,7 @@ static void *_hvm_map_guest_frame(unsign
         if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
         else if ( !permanent )
-            paging_mark_dirty(d, _mfn(page_to_mfn(page)));
+            paging_mark_pfn_dirty(d, _pfn(gfn));
     }
 
     if ( !permanent )
@@ -3254,7 +3254,7 @@ static enum hvm_translation_result __hvm
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(v->domain, _mfn(page_to_mfn(page)));
+                paging_mark_pfn_dirty(v->domain, _pfn(gfn_x(gfn)));
             }
         }
         else
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -283,7 +283,7 @@ static int hvm_add_ioreq_gfn(
     rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
                                 _mfn(page_to_mfn(iorp->page)), 0);
     if ( rc == 0 )
-        paging_mark_dirty(d, _mfn(page_to_mfn(iorp->page)));
+        paging_mark_pfn_dirty(d, _pfn(iorp->gfn));
 
     return rc;
 }
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1221,7 +1221,7 @@ p2m_pod_demand_populate(struct p2m_domai
     for( i = 0; i < (1UL << order); i++ )
     {
         set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn_aligned) + i);
-        paging_mark_dirty(d, mfn_add(mfn, i));
+        paging_mark_pfn_dirty(d, _pfn(gfn_x(gfn_aligned) + i));
     }
 
     p2m->pod.entry_count -= (1UL << order);
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3742,8 +3742,7 @@ long do_mmu_update(
             }
 
             set_gpfn_from_mfn(mfn, gpfn);
-
-            paging_mark_dirty(pg_owner, _mfn(mfn));
+            paging_mark_pfn_dirty(pg_owner, _pfn(gpfn));
 
             put_page(page);
             break;




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.