[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3 of 6] x86/mm/shadow: fix potential p2m/paging deadlock when emulating page table writes


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
  • Date: Fri, 13 Apr 2012 12:22:21 -0400
  • Cc: andres@xxxxxxxxxxxxxx, tim@xxxxxxx
  • Delivery-date: Fri, 13 Apr 2012 16:17:57 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=content-type :mime-version:content-transfer-encoding:subject:message-id :in-reply-to:references:date:from:to:cc; q=dns; s= lagarcavilla.org; b=Jxo3iHsPntXYcFvJfSEgWncu9VqtvKWny0A7TRSkVO59 tyLGH6Lt7PHBL/zXsp8msQOdBWtnqUNg0H6M+PdjZvAvY33BxC5/MVqDzZ4FM1Ls GKyUggWFsHHE4hxxXiZtOOzz5QJ5DXh3IIQczBTvx8orKvxHyVnG5B+hWxQP6vA=
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

 xen/arch/x86/mm/shadow/multi.c |  25 +++++++++++++++++++++++++
 1 files changed, 25 insertions(+), 0 deletions(-)


Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r f8fd4a4239a8 -r 964c6cbad926 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -5033,9 +5033,21 @@ sh_x86_emulate_write(struct vcpu *v, uns
     if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v)  )
         return X86EMUL_UNHANDLEABLE;
 
+    /* To prevent a shadow mode deadlock, we need to hold the p2m from here
+     * onwards. emulate_unmap_dest may need to verify the pte that is being
+     * written to here, and thus get_gfn on the gfn contained in the payload
+     * that is being written here. p2m_lock is recursive, so all is well on
+     * that regard. Further, holding the p2m lock ensures the page table gfn
+     * being written to won't go away (although that could be achieved with
+     * a page reference, as done elsewhere).
+     */
+    p2m_lock(p2m_get_hostp2m(v->domain));
     addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
     if ( emulate_map_dest_failed(addr) )
+    {
+        p2m_unlock(p2m_get_hostp2m(v->domain));
         return (long)addr;
+    }
 
     paging_lock(v->domain);
     memcpy(addr, src, bytes);
@@ -5059,6 +5071,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
     emulate_unmap_dest(v, addr, bytes, sh_ctxt);
     shadow_audit_tables(v);
     paging_unlock(v->domain);
+    p2m_unlock(p2m_get_hostp2m(v->domain));
     return X86EMUL_OKAY;
 }
 
@@ -5075,9 +5088,14 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v)  )
         return X86EMUL_UNHANDLEABLE;
 
+    /* see comment in sh_x86_emulate_write. */
+    p2m_lock(p2m_get_hostp2m(v->domain));
     addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
     if ( emulate_map_dest_failed(addr) )
+    {
+        p2m_unlock(p2m_get_hostp2m(v->domain));
         return (long)addr;
+    }
 
     paging_lock(v->domain);
     switch ( bytes )
@@ -5101,6 +5119,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     emulate_unmap_dest(v, addr, bytes, sh_ctxt);
     shadow_audit_tables(v);
     paging_unlock(v->domain);
+    p2m_unlock(p2m_get_hostp2m(v->domain));
     return rv;
 }
 
@@ -5119,9 +5138,14 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     if ( (vaddr & 7) && !is_hvm_vcpu(v) )
         return X86EMUL_UNHANDLEABLE;
 
+    /* see comment in sh_x86_emulate_write. */
+    p2m_lock(p2m_get_hostp2m(v->domain));
     addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
     if ( emulate_map_dest_failed(addr) )
+    {
+        p2m_unlock(p2m_get_hostp2m(v->domain));
         return (long)addr;
+    }
 
     old = (((u64) old_hi) << 32) | (u64) old_lo;
     new = (((u64) new_hi) << 32) | (u64) new_lo;
@@ -5135,6 +5159,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     emulate_unmap_dest(v, addr, 8, sh_ctxt);
     shadow_audit_tables(v);
     paging_unlock(v->domain);
+    p2m_unlock(p2m_get_hostp2m(v->domain));
     return rv;
 }
 #endif

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.