WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 mm: free p2m pages to the shadow/hap

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 mm: free p2m pages to the shadow/hap pool.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 09 Oct 2010 14:56:14 -0700
Delivery-date: Sat, 09 Oct 2010 15:05:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1285930673 -3600
# Node ID c8331262efe61e33881ef6354a319ff6283e246d
# Parent  71f836615ea211ac4e6f3b9793f58c6f6934c030
x86 mm: free p2m pages to the shadow/hap pool.

This allows the p2m code to dynamically free and reallocate memory
rather than just freeing everything once at domain teardown.
The previous mechanism (allocating p2m pages from shadow/hap
memory but freeing them directly to the domheap) was a relic
of the original shadow2's rather complex pool code.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/mm/hap/hap.c       |    7 ++++++-
 xen/arch/x86/mm/shadow/common.c |   30 +++++++++++++++++++-----------
 2 files changed, 25 insertions(+), 12 deletions(-)

diff -r 71f836615ea2 -r c8331262efe6 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Fri Sep 24 15:54:39 2010 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Fri Oct 01 11:57:53 2010 +0100
@@ -329,8 +329,9 @@ static void hap_free_p2m_page(struct p2m
     /* Free should not decrement domain's total allocation, since
      * these pages were allocated without an owner. */
     page_set_owner(pg, NULL);
-    free_domheap_page(pg);
     d->arch.paging.hap.p2m_pages--;
+    d->arch.paging.hap.total_pages++;
+    hap_free(d, page_to_mfn(pg));
     ASSERT(d->arch.paging.hap.p2m_pages >= 0);
     hap_unlock(d);
 }
@@ -618,7 +619,11 @@ void hap_final_teardown(struct domain *d
         hap_teardown(d);
 
     p2m_teardown(p2m_get_hostp2m(d));
+    /* Free any memory that the p2m teardown released */
+    hap_lock(d);
+    hap_set_allocation(d, 0, NULL);
     ASSERT(d->arch.paging.hap.p2m_pages == 0);
+    hap_unlock(d);
 }
 
 void hap_teardown(struct domain *d)
diff -r 71f836615ea2 -r c8331262efe6 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Sep 24 15:54:39 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Oct 01 11:57:53 2010 +0100
@@ -1577,7 +1577,6 @@ void shadow_free(struct domain *d, mfn_t
 
     shadow_type = sp->u.sh.type;
     ASSERT(shadow_type != SH_type_none);
-    ASSERT(shadow_type != SH_type_p2m_table);
     ASSERT(sp->u.sh.head || (shadow_type > SH_type_max_shadow));
     pages = shadow_size(shadow_type);
 
@@ -1637,6 +1636,8 @@ shadow_alloc_p2m_page(struct p2m_domain 
  
     shadow_prealloc(d, SH_type_p2m_table, 1);
     pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
+    d->arch.paging.shadow.p2m_pages++;
+    d->arch.paging.shadow.total_pages--;
 
     shadow_unlock(d);
 
@@ -1647,8 +1648,6 @@ shadow_alloc_p2m_page(struct p2m_domain 
      * believed to be a concern. */
     page_set_owner(pg, d);
     pg->count_info |= 1;
-    d->arch.paging.shadow.p2m_pages++;
-    d->arch.paging.shadow.total_pages--;
     return pg;
 }
 
@@ -1664,12 +1663,14 @@ shadow_free_p2m_page(struct p2m_domain *
                      pg->count_info, pg->u.inuse.type_info);
     }
     pg->count_info &= ~PGC_count_mask;
-    /* Free should not decrement domain's total allocation, since 
-     * these pages were allocated without an owner. */
+    pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
     page_set_owner(pg, NULL); 
-    free_domheap_pages(pg, 0);
+
+    shadow_lock(d);
+    shadow_free(d, page_to_mfn(pg));
     d->arch.paging.shadow.p2m_pages--;
-    perfc_decr(shadow_alloc_count);
+    d->arch.paging.shadow.total_pages++;
+    shadow_unlock(d);
 }
 
 #if CONFIG_PAGING_LEVELS == 3
@@ -3114,7 +3115,7 @@ void shadow_teardown(struct domain *d)
 {
     struct vcpu *v;
     mfn_t mfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    struct page_info *unpaged_pagetable = NULL;
 
     ASSERT(d->is_dying);
     ASSERT(d != current->domain);
@@ -3200,8 +3201,8 @@ void shadow_teardown(struct domain *d)
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
         }
-        shadow_free_p2m_page(p2m, 
-            pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable));
+        unpaged_pagetable = 
+            pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
     }
 
@@ -3218,6 +3219,10 @@ void shadow_teardown(struct domain *d)
     }
 
     shadow_unlock(d);
+
+    /* Must be called outside the lock */
+    if ( unpaged_pagetable ) 
+        shadow_free_p2m_page(p2m_get_hostp2m(d), unpaged_pagetable);
 }
 
 void shadow_final_teardown(struct domain *d)
@@ -3238,13 +3243,16 @@ void shadow_final_teardown(struct domain
 
     /* It is now safe to pull down the p2m map. */
     p2m_teardown(p2m_get_hostp2m(d));
-
+    /* Free any shadow memory that the p2m teardown released */
+    shadow_lock(d);
+    sh_set_allocation(d, 0, NULL);
     SHADOW_PRINTK("dom %u final teardown done."
                    "  Shadow pages total = %u, free = %u, p2m=%u\n",
                    d->domain_id,
                    d->arch.paging.shadow.total_pages, 
                    d->arch.paging.shadow.free_pages, 
                    d->arch.paging.shadow.p2m_pages);
+    shadow_unlock(d);
 }
 
 static int shadow_one_bit_enable(struct domain *d, u32 mode)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 mm: free p2m pages to the shadow/hap pool., Xen patchbot-unstable <=