# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID b91f42ea5985b399343479e0f9395fc3467428cd
# Parent de7c20b6eaae7b9ac71eb3b63b5bff5b6d6a5220
# Parent a5153d9c8c9f7a8cbd6f51f4cce2c2ecc1a0c498
Merge
---
xen/arch/x86/mm/shadow/common.c | 50 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 50 insertions(+)
diff -r de7c20b6eaae -r b91f42ea5985 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Thu Nov 09 16:05:00 2006 +0000
+++ b/xen/arch/x86/mm/shadow/common.c Thu Nov 09 16:06:43 2006 +0000
@@ -635,6 +635,56 @@ void shadow_prealloc(struct domain *d, u
BUG();
}
+#ifndef NDEBUG
+/* Deliberately free all the memory we can: this can be used to cause the
+ * guest's pagetables to be re-shadowed if we suspect that the shadows
+ * have somehow got out of sync */
+static void shadow_blow_tables(unsigned char c)
+{
+ struct list_head *l, *t;
+ struct page_info *pg;
+ struct domain *d;
+ struct vcpu *v;
+ mfn_t smfn;
+
+ for_each_domain(d)
+ {
+ if ( shadow_mode_enabled(d) && (v = d->vcpu[0]) != NULL)
+ {
+ shadow_lock(d);
+ printk("Blowing shadow tables for domain %u\n", d->domain_id);
+
+ /* Pass one: unpin all top-level pages */
+ list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
+ {
+ pg = list_entry(l, struct page_info, list);
+ smfn = page_to_mfn(pg);
+ sh_unpin(v, smfn);
+ }
+
+ /* Second pass: unhook entries of in-use shadows */
+ list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
+ {
+ pg = list_entry(l, struct page_info, list);
+ smfn = page_to_mfn(pg);
+ shadow_unhook_mappings(v, smfn);
+ }
+
+ /* Make sure everyone sees the unshadowings */
+ flush_tlb_mask(d->domain_dirty_cpumask);
+ shadow_unlock(d);
+ }
+ }
+}
+
+/* Register this function in the Xen console keypress table */
+static __init int shadow_blow_tables_keyhandler_init(void)
+{
+ register_keyhandler('S', shadow_blow_tables, "reset shadow pagetables");
+ return 0;
+}
+__initcall(shadow_blow_tables_keyhandler_init);
+#endif /* !NDEBUG */
/* Allocate another shadow's worth of (contiguous, aligned) pages,
* and fill in the type and backpointer fields of their page_infos.
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|