# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID cb0d26d68adf6c66e59493c0dfd91155ff11a7f1
# Parent a10ef8002af6d37da35766510a3dc08375582d98
[XEN] Stricter TLB-flush discipline when unshadowing pagetables
It's OK for the guest to see old entries in the TLB, but not for the
shadow fault handler to see them in its linear mappings.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/mm/shadow/common.c | 43 ++++++++++++++++++++++++---------------
xen/arch/x86/mm/shadow/multi.c | 32 -----------------------------
xen/include/asm-x86/perfc_defn.h | 1
3 files changed, 28 insertions(+), 48 deletions(-)
diff -r a10ef8002af6 -r cb0d26d68adf xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Fri Oct 20 16:01:49 2006 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Fri Oct 20 16:06:53 2006 +0100
@@ -567,13 +567,18 @@ void shadow_prealloc(struct domain *d, u
{
/* Need a vpcu for calling unpins; for now, since we don't have
* per-vcpu shadows, any will do */
- struct vcpu *v = d->vcpu[0];
+ struct vcpu *v, *v2;
struct list_head *l, *t;
struct page_info *pg;
+ cpumask_t flushmask = CPU_MASK_NONE;
mfn_t smfn;
if ( chunk_is_available(d, order) ) return;
+ v = current;
+ if ( v->domain != d )
+ v = d->vcpu[0];
+
/* Stage one: walk the list of top-level pages, unpinning them */
perfc_incrc(shadow_prealloc_1);
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
@@ -592,28 +597,30 @@ void shadow_prealloc(struct domain *d, u
* loaded in cr3 on some vcpu. Walk them, unhooking the non-Xen
* mappings. */
perfc_incrc(shadow_prealloc_2);
- v = current;
- if ( v->domain != d )
- v = d->vcpu[0];
- /* Walk the list from the tail: recently used toplevels have been pulled
- * to the head */
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
{
pg = list_entry(l, struct page_info, list);
smfn = page_to_mfn(pg);
shadow_unhook_mappings(v, smfn);
- /* Need to flush TLB if we've altered our own tables */
- if ( !shadow_mode_external(d) &&
- (pagetable_get_pfn(current->arch.shadow_table[0]) == mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[1]) ==
mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[2]) ==
mfn_x(smfn)
- || pagetable_get_pfn(current->arch.shadow_table[3]) ==
mfn_x(smfn)
- ) )
- local_flush_tlb();
-
+ /* Remember to flush TLBs: we have removed shadow entries that
+ * were in use by some vcpu(s). */
+ for_each_vcpu(d, v2)
+ {
+ if ( pagetable_get_pfn(v2->arch.shadow_table[0]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[1]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[2]) == mfn_x(smfn)
+ || pagetable_get_pfn(v2->arch.shadow_table[3]) == mfn_x(smfn)
+ )
+ cpus_or(flushmask, v2->vcpu_dirty_cpumask, flushmask);
+ }
+
/* See if that freed up a chunk of appropriate size */
- if ( chunk_is_available(d, order) ) return;
+ if ( chunk_is_available(d, order) )
+ {
+ flush_tlb_mask(flushmask);
+ return;
+ }
}
/* Nothing more we can do: all remaining shadows are of pages that
@@ -2216,6 +2223,10 @@ void sh_remove_shadows(struct vcpu *v, m
if ( all )
domain_crash(v->domain);
}
+
+ /* Need to flush TLBs now, so that linear maps are safe next time we
+ * take a fault. */
+ flush_tlb_mask(v->domain->domain_dirty_cpumask);
}
void
diff -r a10ef8002af6 -r cb0d26d68adf xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Fri Oct 20 16:01:49 2006 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Fri Oct 20 16:06:53 2006 +0100
@@ -2562,41 +2562,11 @@ static inline void check_for_early_unsha
sh_mfn_is_a_page_table(gmfn) )
{
u32 flags = mfn_to_page(gmfn)->shadow_flags;
- mfn_t smfn;
if ( !(flags & (SHF_L2_32|SHF_L2_PAE|SHF_L2H_PAE|SHF_L4_64)) )
{
perfc_incrc(shadow_early_unshadow);
sh_remove_shadows(v, gmfn, 1, 0 /* Fast, can fail to unshadow */ );
- return;
- }
- /* SHF_unhooked_mappings is set to make sure we only unhook
- * once in a single batch of updates. It is reset when this
- * top-level page is loaded into CR3 again */
- if ( !(flags & SHF_unhooked_mappings) )
- {
- perfc_incrc(shadow_early_unshadow_top);
- mfn_to_page(gmfn)->shadow_flags |= SHF_unhooked_mappings;
- if ( flags & SHF_L2_32 )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2_32_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L2_PAE )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2_pae_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L2H_PAE )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l2h_pae_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- if ( flags & SHF_L4_64 )
- {
- smfn = get_shadow_status(v, gmfn, PGC_SH_l4_64_shadow);
- shadow_unhook_mappings(v, smfn);
- }
- }
+ }
}
v->arch.shadow.last_emulated_mfn = mfn_x(gmfn);
#endif
diff -r a10ef8002af6 -r cb0d26d68adf xen/include/asm-x86/perfc_defn.h
--- a/xen/include/asm-x86/perfc_defn.h Fri Oct 20 16:01:49 2006 +0100
+++ b/xen/include/asm-x86/perfc_defn.h Fri Oct 20 16:06:53 2006 +0100
@@ -76,7 +76,6 @@ PERFCOUNTER_CPU(shadow_mappings, "
PERFCOUNTER_CPU(shadow_mappings, "shadow removes all mappings")
PERFCOUNTER_CPU(shadow_mappings_bf, "shadow rm-mappings brute-force")
PERFCOUNTER_CPU(shadow_early_unshadow, "shadow unshadows for fork/exit")
-PERFCOUNTER_CPU(shadow_early_unshadow_top, "shadow unhooks for fork/exit")
PERFCOUNTER_CPU(shadow_unshadow, "shadow unshadows a page")
PERFCOUNTER_CPU(shadow_up_pointer, "shadow unshadow by up-pointer")
PERFCOUNTER_CPU(shadow_unshadow_bf, "shadow unshadow brute-force")
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|