WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] mm: Cleanup, use PAGE_ORDER_4K as page_or

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] mm: Cleanup, use PAGE_ORDER_4K as page_order instead of "0".
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Fri, 11 Nov 2011 04:33:32 +0000
Delivery-date: Thu, 10 Nov 2011 20:38:41 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jean Guyader <jean.guyader@xxxxxxxxxxxxx>
# Date 1320757301 0
# Node ID be8daf78856a605ec8f8df11ff13c325dc4bc70f
# Parent  452d9143687f826a30d0e56b7ba3b9783fb6bf24
mm: Cleanup, use PAGE_ORDER_4K as page_order instead of "0".

Signed-off-by: Jean Guyader <jean.guyader@xxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 452d9143687f -r be8daf78856a xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Nov 08 11:26:53 2011 +0100
+++ b/xen/arch/x86/mm.c Tue Nov 08 13:01:41 2011 +0000
@@ -4005,7 +4005,7 @@
     else
         p2mt = p2m_grant_map_rw;
     rc = guest_physmap_add_entry(current->domain,
-                                 addr >> PAGE_SHIFT, frame, 0, p2mt);
+                                 addr >> PAGE_SHIFT, frame, PAGE_ORDER_4K, 
p2mt);
     if ( rc )
         return GNTST_general_error;
     else
@@ -4062,7 +4062,7 @@
                  type, mfn_x(old_mfn), frame);
         return GNTST_general_error;
     }
-    guest_physmap_remove_page(d, gfn, frame, 0);
+    guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
 
     return GNTST_okay;
 }
@@ -4689,7 +4689,7 @@
         {
             if ( is_xen_heap_mfn(prev_mfn) )
                 /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 
PAGE_ORDER_4K);
             else
                 /* Normal domain memory is freed, to avoid leaking memory. */
                 guest_remove_page(d, xatp.gpfn);
@@ -4699,10 +4699,10 @@
         gpfn = get_gpfn_from_mfn(mfn);
         ASSERT( gpfn != SHARED_M2P_ENTRY );
         if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+            guest_physmap_remove_page(d, gpfn, mfn, PAGE_ORDER_4K);
 
         /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, PAGE_ORDER_4K);
 
         domain_unlock(d);
 
diff -r 452d9143687f -r be8daf78856a xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Tue Nov 08 11:26:53 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Tue Nov 08 13:01:41 2011 +0000
@@ -254,7 +254,7 @@
 
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     p2m->defer_nested_flush = 1;
-    if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), 0,
+    if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
                         p2m_invalid, p2m->default_access) )
         goto error;
 
@@ -276,7 +276,7 @@
                 (gfn != 0x55555555L)
 #endif
                 && gfn != INVALID_M2P_ENTRY
-                && !set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_rw, 
p2m->default_access) )
+                && !set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_rw, 
p2m->default_access) )
                 goto error_unlock;
         }
         spin_unlock(&p2m->domain->page_alloc_lock);
@@ -549,7 +549,7 @@
 
     mfn = gfn_to_mfn_query(d, gfn, &pt);
     if ( pt == ot )
-        set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
+        set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
 
     p2m_unlock(p2m);
 
@@ -576,7 +576,7 @@
     {
         mfn = gfn_to_mfn_query(d, gfn, &pt);
         if ( pt == ot )
-            set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
+            set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, 
p2m->default_access);
     }
 
     p2m->defer_nested_flush = 0;
@@ -613,7 +613,7 @@
     }
 
     P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_mmio_direct, p2m->default_access);
+    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, 
p2m->default_access);
     audit_p2m(p2m, 1);
     p2m_unlock(p2m);
     if ( 0 == rc )
@@ -644,7 +644,7 @@
             "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
         goto out;
     }
-    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_invalid, 
p2m->default_access);
+    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, 
p2m_invalid, p2m->default_access);
     audit_p2m(p2m, 1);
 
 out:
@@ -674,7 +674,7 @@
     set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
 
     P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_shared, p2m->default_access);
+    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, 
p2m->default_access);
     p2m_unlock(p2m);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
@@ -739,7 +739,7 @@
         goto out;
 
     /* Fix p2m entry */
-    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_out, a);
+    set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
     audit_p2m(p2m, 1);
     ret = 0;
 
@@ -806,7 +806,7 @@
         put_page(page);
 
     /* Remove mapping from p2m table */
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, p2m_ram_paged, a);
+    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, 
a);
     audit_p2m(p2m, 1);
 
     /* Clear content before returning the page to Xen */
@@ -900,7 +900,7 @@
         if ( p2mt == p2m_ram_paging_out )
             req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
 
-        set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in_start, a);
+        set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in_start, 
a);
         audit_p2m(p2m, 1);
     }
     p2m_unlock(p2m);
@@ -968,7 +968,7 @@
     }
 
     /* Fix p2m mapping */
-    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in, a);
+    set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
 
     atomic_dec(&d->paged_pages);
@@ -1016,7 +1016,7 @@
         if ( mfn_valid(mfn) && 
              (p2mt == p2m_ram_paging_in || p2mt == p2m_ram_paging_in_start) )
         {
-            set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, a);
+            set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, p2m_ram_rw, a);
             set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
             audit_p2m(p2m, 1);
         }
diff -r 452d9143687f -r be8daf78856a xen/common/memory.c
--- a/xen/common/memory.c       Tue Nov 08 11:26:53 2011 +0100
+++ b/xen/common/memory.c       Tue Nov 08 13:01:41 2011 +0000
@@ -165,7 +165,7 @@
     mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); 
     if ( unlikely(p2m_is_paging(p2mt)) )
     {
-        guest_physmap_remove_page(d, gmfn, mfn, 0);
+        guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
         p2m_mem_paging_drop_page(d, gmfn);
         return 1;
     }
@@ -186,7 +186,7 @@
     if(p2m_is_shared(p2mt))
     {
         put_page_and_type(page);
-        guest_physmap_remove_page(d, gmfn, mfn, 0);
+        guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
         return 1;
     }
 
@@ -203,7 +203,7 @@
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
 
-    guest_physmap_remove_page(d, gmfn, mfn, 0);
+    guest_physmap_remove_page(d, gmfn, mfn, PAGE_ORDER_4K);
 
     put_page(page);
 
@@ -418,7 +418,7 @@
             gfn = mfn_to_gmfn(d, mfn);
             /* Pages were unshared above */
             BUG_ON(SHARED_M2P(gfn));
-            guest_physmap_remove_page(d, gfn, mfn, 0);
+            guest_physmap_remove_page(d, gfn, mfn, PAGE_ORDER_4K);
             put_page(page);
         }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] mm: Cleanup, use PAGE_ORDER_4K as page_order instead of "0"., Xen patchbot-unstable <=