# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260522373 0
# Node ID 8b5b13089f6ecb91e0f87c0863b3ba18917a2bc1
# Parent 2cf845e9086c62efc5eb525531fc5777c1e4da9b
PoD: appropriate BUG_ON when domain is dying
BUG_ON(d->is_dying) in p2m_pod_cache_add() which is introduced in
c/s 20426 is not proper. Since dom->is_dying is set asynchronously.
For example, MMU_UPDATE hypercalls from qemu and the
DOMCTL_destroydomain hypercall from xend can be issued simultaneously.
Also this patch lets p2m_pod_empty_cache() wait by spin_barrier
until another PoD operation ceases.
Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
xen-unstable changeset: 20606:295e77eed8c9
xen-unstable date: Fri Dec 11 08:42:28 2009 +0000
---
xen/arch/x86/mm/p2m.c | 25 +++++++++++++++++++++----
1 files changed, 21 insertions(+), 4 deletions(-)
diff -r 2cf845e9086c -r 8b5b13089f6e xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Fri Dec 11 09:05:46 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c Fri Dec 11 09:06:13 2009 +0000
@@ -257,6 +257,8 @@ p2m_pod_cache_add(struct domain *d,
}
#endif
+ ASSERT(p2m_locked_by_me(p2md));
+
/*
* Pages from domain_alloc and returned by the balloon driver aren't
* guaranteed to be zero; but by reclaiming zero pages, we implicitly
@@ -293,7 +295,9 @@ p2m_pod_cache_add(struct domain *d,
BUG();
}
- BUG_ON(d->is_dying);
+ /* Ensure that the PoD cache has never been emptied.
+ * This may cause "zombie domains" since the page will never be freed. */
+ BUG_ON( d->arch.relmem != RELMEM_not_started );
spin_unlock(&d->page_alloc_lock);
@@ -491,6 +495,8 @@ p2m_pod_set_mem_target(struct domain *d,
int ret = 0;
unsigned long populated;
+ p2m_lock(p2md);
+
/* P == B: Nothing to do. */
if ( p2md->pod.entry_count == 0 )
goto out;
@@ -518,6 +524,8 @@ p2m_pod_set_mem_target(struct domain *d,
ret = p2m_pod_set_cache_target(d, pod_target);
out:
+ p2m_unlock(p2md);
+
return ret;
}
@@ -526,6 +534,10 @@ p2m_pod_empty_cache(struct domain *d)
{
struct p2m_domain *p2md = d->arch.p2m;
struct page_info *page;
+
+ /* After this barrier no new PoD activities can happen. */
+ BUG_ON(!d->is_dying);
+ spin_barrier(&p2md->lock);
spin_lock(&d->page_alloc_lock);
@@ -578,7 +590,7 @@ p2m_pod_decrease_reservation(struct doma
/* If we don't have any outstanding PoD entries, let things take their
* course */
- if ( p2md->pod.entry_count == 0 || unlikely(d->is_dying) )
+ if ( p2md->pod.entry_count == 0 )
goto out;
/* Figure out if we need to steal some freed memory for our cache */
@@ -586,6 +598,9 @@ p2m_pod_decrease_reservation(struct doma
p2m_lock(p2md);
audit_p2m(d);
+
+ if ( unlikely(d->is_dying) )
+ goto out_unlock;
/* See what's in here. */
/* FIXME: Add contiguous; query for PSE entries? */
@@ -996,9 +1011,11 @@ p2m_pod_demand_populate(struct domain *d
struct p2m_domain *p2md = d->arch.p2m;
int i;
+ ASSERT(p2m_locked_by_me(d->arch.p2m));
+
/* This check is done with the p2m lock held. This will make sure that
- * even if d->is_dying changes under our feet, empty_pod_cache() won't
start
- * until we're done. */
+ * even if d->is_dying changes under our feet, p2m_pod_empty_cache()
+ * won't start until we're done. */
if ( unlikely(d->is_dying) )
goto out_fail;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|