# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID 0bbc1e003ef22399d24eb918507c7f5abe5b0bef
# Parent f0ba459065d32b61c9d04ccde5787e48e23be782
[XEN] Remove write access to new PT before discarding old shadow.
This allows us to use the old pagetables's linear maps in our
remove-writeable-mappings heuristics, fixing the same crash that
cset 12339 did, but still letting us do fast revoke of writeable
mappings of toplevel pagetables.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/mm/shadow/multi.c | 87 ++++++++++++++++++++++++++++++-----------
1 files changed, 64 insertions(+), 23 deletions(-)
diff -r f0ba459065d3 -r 0bbc1e003ef2 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Thu Nov 16 18:47:28 2006 -0800
+++ b/xen/arch/x86/mm/shadow/multi.c Mon Nov 20 12:03:51 2006 +0000
@@ -3262,9 +3262,25 @@ sh_set_toplevel_shadow(struct vcpu *v,
mfn_t gmfn,
unsigned int root_type)
{
- mfn_t smfn = get_shadow_status(v, gmfn, root_type);
+ mfn_t smfn;
struct domain *d = v->domain;
- ASSERT(pagetable_is_null(v->arch.shadow_table[slot]));
+
+ /* Decrement the refcount of the old contents of this slot */
+ smfn = pagetable_get_mfn(v->arch.shadow_table[slot]);
+ if ( mfn_x(smfn) )
+ sh_put_ref(v, smfn, 0);
+
+ /* Now figure out the new contents: is this a valid guest MFN? */
+ if ( !valid_mfn(gmfn) )
+ {
+ SHADOW_PRINTK("%u/%u [%u] invalid gmfn\n",
+ GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot);
+ v->arch.shadow_table[slot] = pagetable_null();
+ return;
+ }
+
+ /* Guest mfn is valid: shadow it and install the shadow */
+ smfn = get_shadow_status(v, gmfn, root_type);
if ( valid_mfn(smfn) )
{
/* Pull this root shadow to the front of the list of roots. */
@@ -3273,10 +3289,6 @@ sh_set_toplevel_shadow(struct vcpu *v,
}
else
{
- /* This guest MFN is a pagetable. Must revoke write access
- * (and can't use heuristics because we have no linear map here). */
- if ( shadow_remove_write_access(v, gmfn, 0, 0) != 0 )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
/* Make sure there's enough free shadow memory. */
shadow_prealloc(d, SHADOW_MAX_ORDER);
/* Shadow the page. */
@@ -3291,7 +3303,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
#endif
- /* Take a ref to this page: it will be released in sh_detach_old_tables. */
+ /* Take a ref to this page: it will be released in sh_detach_old_tables()
+ * or in the next call to sh_set_toplevel_shadow(). */
sh_get_ref(smfn, 0);
sh_pin(smfn);
@@ -3363,8 +3376,6 @@ sh_update_cr3(struct vcpu *v)
#endif
gmfn = pagetable_get_mfn(v->arch.guest_table);
- sh_detach_old_tables(v);
-
if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
{
ASSERT(v->arch.cr3 == 0);
@@ -3376,10 +3387,16 @@ sh_update_cr3(struct vcpu *v)
////
#if GUEST_PAGING_LEVELS == 4
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
+ {
+ if ( v->arch.guest_vtable )
+ sh_unmap_domain_page_global(v->arch.guest_vtable);
v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
+ }
else
v->arch.guest_vtable = __linear_l4_table;
#elif GUEST_PAGING_LEVELS == 3
+ if ( v->arch.guest_vtable )
+ sh_unmap_domain_page_global(v->arch.guest_vtable);
if ( shadow_mode_external(d) )
{
if ( shadow_vcpu_mode_translate(v) )
@@ -3401,7 +3418,11 @@ sh_update_cr3(struct vcpu *v)
v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
#elif GUEST_PAGING_LEVELS == 2
if ( shadow_mode_external(d) || shadow_mode_translate(d) )
+ {
+ if ( v->arch.guest_vtable )
+ sh_unmap_domain_page_global(v->arch.guest_vtable);
v->arch.guest_vtable = sh_map_domain_page_global(gmfn);
+ }
else
v->arch.guest_vtable = __linear_l2_table;
#else
@@ -3417,29 +3438,49 @@ sh_update_cr3(struct vcpu *v)
//// vcpu->arch.shadow_table[]
////
+ /* We revoke write access to the new guest toplevel page(s) before we
+ * replace the old shadow pagetable(s), so that we can safely use the
+ * (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
+ if ( shadow_remove_write_access(v, gmfn, 2, 0) != 0 )
+ flush_tlb_mask(v->domain->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries, based on the
* current values of the guest's four l3es. */
{
- int i;
+ int i, flush = 0;
+ gfn_t gl2gfn;
+ mfn_t gl2mfn;
guest_l3e_t *gl3e = (guest_l3e_t*)v->arch.guest_vtable;
- for ( i = 0; i < 4; i++ )
- {
- ASSERT(pagetable_is_null(v->arch.shadow_table[i]));
+ /* First, make all four entries read-only. */
+ for ( i = 0; i < 4; i++ )
+ {
if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
{
- gfn_t gl2gfn = guest_l3e_get_gfn(gl3e[i]);
- mfn_t gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
- if ( valid_mfn(gl2mfn) )
- sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
- ? PGC_SH_l2h_shadow
- : PGC_SH_l2_shadow);
+ gl2gfn = guest_l3e_get_gfn(gl3e[i]);
+ gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
+ flush |= shadow_remove_write_access(v, gl2mfn, 2, 0);
}
}
+ if ( flush )
+ flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ /* Now install the new shadows. */
+ for ( i = 0; i < 4; i++ )
+ {
+ if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
+ {
+ gl2gfn = guest_l3e_get_gfn(gl3e[i]);
+ gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
+ sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
+ ? PGC_SH_l2h_shadow
+ : PGC_SH_l2_shadow);
+ }
+ }
}
#elif GUEST_PAGING_LEVELS == 4
+ if ( shadow_remove_write_access(v, gmfn, 4, 0) != 0 )
+ flush_tlb_mask(v->domain->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l4_shadow);
#else
#error This should never happen
@@ -3527,9 +3568,9 @@ static int sh_guess_wrmap(struct vcpu *v
{
shadow_l1e_t sl1e, *sl1p;
shadow_l2e_t *sl2p;
-#if GUEST_PAGING_LEVELS >= 3
+#if SHADOW_PAGING_LEVELS >= 3
shadow_l3e_t *sl3p;
-#if GUEST_PAGING_LEVELS >= 4
+#if SHADOW_PAGING_LEVELS >= 4
shadow_l4e_t *sl4p;
#endif
#endif
@@ -3537,14 +3578,14 @@ static int sh_guess_wrmap(struct vcpu *v
/* Carefully look in the shadow linear map for the l1e we expect */
-#if GUEST_PAGING_LEVELS >= 4
+#if SHADOW_PAGING_LEVELS >= 4
sl4p = sh_linear_l4_table(v) + shadow_l4_linear_offset(vaddr);
if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
return 0;
sl3p = sh_linear_l3_table(v) + shadow_l3_linear_offset(vaddr);
if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
return 0;
-#elif GUEST_PAGING_LEVELS == 3
+#elif SHADOW_PAGING_LEVELS == 3
sl3p = ((shadow_l3e_t *) v->arch.shadow.l3table)
+ shadow_l3_linear_offset(vaddr);
if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|