# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1278579171 -3600
# Node ID 92ac9536ac5abc17f414f024f3df92658cf2ee96
# Parent db35740574a5302e535e1268d0e62ffcac95b213
xen: make the shadow allocation hypercalls include the p2m memory
in the total shadow allocation. This makes the effect of allocation
changes consistent regardless of p2m activity on boot.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
xen/arch/x86/mm/hap/hap.c | 13 ++++++++++++-
xen/arch/x86/mm/shadow/common.c | 29 ++++++++++++++++++++---------
2 files changed, 32 insertions(+), 10 deletions(-)
diff -r db35740574a5 -r 92ac9536ac5a xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Jul 08 09:52:34 2010 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Thu Jul 08 09:52:51 2010 +0100
@@ -334,7 +334,8 @@ static unsigned int
static unsigned int
hap_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.hap.total_pages;
+ unsigned int pg = d->arch.paging.hap.total_pages
+ + d->arch.paging.hap.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
@@ -348,6 +349,11 @@ hap_set_allocation(struct domain *d, uns
struct page_info *pg;
ASSERT(hap_locked_by_me(d));
+
+ if ( pages < d->arch.paging.hap.p2m_pages )
+ pages = 0;
+ else
+ pages -= d->arch.paging.hap.p2m_pages;
while ( d->arch.paging.hap.total_pages != pages )
{
@@ -367,6 +373,11 @@ hap_set_allocation(struct domain *d, uns
else if ( d->arch.paging.hap.total_pages > pages )
{
/* Need to return memory to domheap */
+ if ( page_list_empty(&d->arch.paging.hap.freelist) )
+ {
+ HAP_PRINTK("failed to free enough hap pages.\n");
+ return -ENOMEM;
+ }
pg = page_list_remove_head(&d->arch.paging.hap.freelist);
ASSERT(pg);
d->arch.paging.hap.free_pages--;
diff -r db35740574a5 -r 92ac9536ac5a xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Thu Jul 08 09:52:34 2010 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Thu Jul 08 09:52:51 2010 +0100
@@ -1817,14 +1817,24 @@ static unsigned int sh_set_allocation(st
unsigned int j, order = shadow_max_order(d);
ASSERT(shadow_locked_by_me(d));
-
- /* Don't allocate less than the minimum acceptable, plus one page per
- * megabyte of RAM (for the p2m table) */
- lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
- if ( pages > 0 && pages < lower_bound )
- pages = lower_bound;
- /* Round up to largest block size */
- pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1);
+
+ if ( pages > 0 )
+ {
+ /* Check for minimum value. */
+ if ( pages < d->arch.paging.shadow.p2m_pages )
+ pages = 0;
+ else
+ pages -= d->arch.paging.shadow.p2m_pages;
+
+ /* Don't allocate less than the minimum acceptable, plus one page per
+ * megabyte of RAM (for the p2m table) */
+ lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
+ if ( pages < lower_bound )
+ pages = lower_bound;
+
+ /* Round up to largest block size */
+ pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) &
~((1<<SHADOW_MAX_ORDER)-1);
+ }
SHADOW_PRINTK("current %i target %i\n",
d->arch.paging.shadow.total_pages, pages);
@@ -1884,7 +1894,8 @@ static unsigned int sh_set_allocation(st
/* Return the size of the shadow pool, rounded up to the nearest MB */
static unsigned int shadow_get_allocation(struct domain *d)
{
- unsigned int pg = d->arch.paging.shadow.total_pages;
+ unsigned int pg = d->arch.paging.shadow.total_pages
+ + d->arch.paging.shadow.p2m_pages;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|