# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1309426014 -3600
# Node ID 970797044f52db4b4a2ea7b0fe3092e08876e84d
# Parent 33717472f37e679c94aca28bbbcee58b61c1886c
Nested p2m: implement "flush" as a first-class action
rather than using the teardown and init functions.
This makes the locking clearer and avoids an expensive scan of all
pfns that's only needed for non-nested p2ms. It also moves the
tlb flush into the proper place in the flush logic, avoiding a
possible race against other CPUs.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Acked-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---
diff -r 33717472f37e -r 970797044f52 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c Tue Jun 28 18:15:44 2011 +0100
+++ b/xen/arch/x86/hvm/nestedhvm.c Thu Jun 30 10:26:54 2011 +0100
@@ -119,12 +119,6 @@
cpus_clear(p2m->p2m_dirty_cpumask);
}
-void
-nestedhvm_vmcx_flushtlbdomain(struct domain *d)
-{
- on_selected_cpus(d->domain_dirty_cpumask, nestedhvm_flushtlb_ipi, d, 1);
-}
-
bool_t
nestedhvm_is_n2(struct vcpu *v)
{
diff -r 33717472f37e -r 970797044f52 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Tue Jun 28 18:15:44 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu Jun 30 10:26:54 2011 +0100
@@ -1050,20 +1050,41 @@
return lrup2m;
}
-static int
+/* Reset this p2m table to be empty */
+static void
p2m_flush_locked(struct p2m_domain *p2m)
{
- ASSERT(p2m);
- if (p2m->cr3 == CR3_EADDR)
- /* Microoptimisation: p2m is already empty.
- * => about 0.3% speedup of overall system performance.
- */
- return 0;
+ struct page_info *top, *pg;
+ struct domain *d = p2m->domain;
+ void *p;
- p2m_teardown(p2m);
- p2m_initialise(p2m->domain, p2m);
- p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
- return p2m_alloc_table(p2m);
+ p2m_lock(p2m);
+
+ /* "Host" p2m tables can have shared entries &c that need a bit more
+ * care when discarding them */
+ ASSERT(p2m_is_nestedp2m(p2m));
+ ASSERT(page_list_empty(&p2m->pod.super));
+ ASSERT(page_list_empty(&p2m->pod.single));
+
+ /* This is no longer a valid nested p2m for any address space */
+ p2m->cr3 = CR3_EADDR;
+
+ /* Zap the top level of the trie */
+ top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
+ p = __map_domain_page(top);
+ clear_page(p);
+ unmap_domain_page(p);
+
+ /* Make sure nobody else is using this p2m table */
+ nestedhvm_vmcx_flushtlb(p2m);
+
+ /* Free the rest of the trie pages back to the paging pool */
+ while ( (pg = page_list_remove_head(&p2m->pages)) )
+ if ( pg != top )
+ d->arch.paging.free_page(d, pg);
+ page_list_add(top, &p2m->pages);
+
+ p2m_unlock(p2m);
}
void
@@ -1074,9 +1095,8 @@
ASSERT(v->domain == d);
vcpu_nestedhvm(v).nv_p2m = NULL;
nestedp2m_lock(d);
- BUG_ON(p2m_flush_locked(p2m) != 0);
+ p2m_flush_locked(p2m);
hvm_asid_flush_vcpu(v);
- nestedhvm_vmcx_flushtlb(p2m);
nestedp2m_unlock(d);
}
@@ -1086,12 +1106,8 @@
int i;
nestedp2m_lock(d);
- for (i = 0; i < MAX_NESTEDP2M; i++) {
- struct p2m_domain *p2m = d->arch.nested_p2m[i];
- BUG_ON(p2m_flush_locked(p2m) != 0);
- cpus_clear(p2m->p2m_dirty_cpumask);
- }
- nestedhvm_vmcx_flushtlbdomain(d);
+ for ( i = 0; i < MAX_NESTEDP2M; i++ )
+ p2m_flush_locked(d->arch.nested_p2m[i]);
nestedp2m_unlock(d);
}
@@ -1104,7 +1120,7 @@
volatile struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct domain *d;
struct p2m_domain *p2m;
- int i, rv;
+ int i;
if (cr3 == 0 || cr3 == CR3_EADDR)
cr3 = v->arch.hvm_vcpu.guest_cr[3];
@@ -1136,9 +1152,7 @@
*/
for (i = 0; i < MAX_NESTEDP2M; i++) {
p2m = p2m_getlru_nestedp2m(d, NULL);
- rv = p2m_flush_locked(p2m);
- if (rv == 0)
- break;
+ p2m_flush_locked(p2m);
}
nv->nv_p2m = p2m;
p2m->cr3 = cr3;
diff -r 33717472f37e -r 970797044f52 xen/include/asm-x86/hvm/nestedhvm.h
--- a/xen/include/asm-x86/hvm/nestedhvm.h Tue Jun 28 18:15:44 2011 +0100
+++ b/xen/include/asm-x86/hvm/nestedhvm.h Thu Jun 30 10:26:54 2011 +0100
@@ -61,7 +61,6 @@
(!!vcpu_nestedhvm((v)).nv_vmswitch_in_progress)
void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m);
-void nestedhvm_vmcx_flushtlbdomain(struct domain *d);
bool_t nestedhvm_is_n2(struct vcpu *v);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|