WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-3.0.4-testing] [XEN] Better diagnostics of recursiv

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-3.0.4-testing] [XEN] Better diagnostics of recursive shadow faults
From: "Xen patchbot-3.0.4-testing" <patchbot-3.0.4-testing@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 03 Jan 2007 14:15:34 -0800
Delivery-date: Wed, 03 Jan 2007 14:15:55 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1166615581 0
# Node ID d9685264b28cdaa39d2eef62a8c6c9df731be0bd
# Parent  bcaf2f568ebdd00812dd4c9e50f890b958d86ff7
[XEN] Better diagnostics of recursive shadow faults
Give a trace of the recursive fault instead of BUG()ing in the shadow handler.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>

Based on xen-unstable changeset 13101:f7a2cd8b0a8e03e94babc88c9c25fb5008f7a125
---
 xen/arch/x86/mm/shadow/common.c |   44 ++++++++++++++++++++--------------------
 xen/arch/x86/mm/shadow/multi.c  |   24 ++++++++++++++++-----
 xen/include/asm-x86/shadow.h    |    6 ++---
 3 files changed, 43 insertions(+), 31 deletions(-)

diff -r bcaf2f568ebd -r d9685264b28c xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Dec 20 11:39:22 2006 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Dec 20 11:53:01 2006 +0000
@@ -553,7 +553,7 @@ shadow_validate_guest_entry(struct vcpu 
 {
     int rc;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     rc = __shadow_validate_guest_entry(v, gmfn, entry, sizeof(l1_pgentry_t));
     shadow_audit_tables(v);
     return rc;
@@ -569,7 +569,7 @@ shadow_validate_guest_pt_write(struct vc
     struct domain *d = v->domain;
     int rc;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     rc = __shadow_validate_guest_entry(v, gmfn, entry, size);
     if ( rc & SHADOW_SET_FLUSH )
         /* Need to flush TLBs to pick up shadow PT changes */
@@ -858,7 +858,7 @@ mfn_t shadow_alloc(struct domain *d,
     void *p;
     int i;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(order <= SHADOW_MAX_ORDER);
     ASSERT(shadow_type != SH_type_none);
     perfc_incrc(shadow_alloc);
@@ -928,7 +928,7 @@ void shadow_free(struct domain *d, mfn_t
     unsigned long mask;
     int i;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     perfc_incrc(shadow_free);
 
     shadow_type = sp->type;
@@ -997,7 +997,7 @@ shadow_alloc_p2m_pages(struct domain *d)
 {
     struct page_info *pg;
     u32 i;
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     
     if ( d->arch.shadow.total_pages 
          < (shadow_min_acceptable_pages(d) + (1<<SHADOW_MAX_ORDER)) )
@@ -1143,7 +1143,7 @@ p2m_next_level(struct domain *d, mfn_t *
             p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p2m_entry);
             /* Also, any vcpus running on shadows of the p2m need to 
              * reload their CR3s so the change propagates to the shadow */
-            ASSERT(shadow_lock_is_acquired(d));
+            ASSERT(shadow_locked_by_me(d));
             for_each_vcpu(d, v) 
             {
                 if ( pagetable_get_pfn(v->arch.guest_table) 
@@ -1435,7 +1435,7 @@ static unsigned int set_sh_allocation(st
     unsigned int lower_bound;
     int j;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     
     /* Don't allocate less than the minimum acceptable, plus one page per
      * megabyte of RAM (for the p2m table) */
@@ -1614,7 +1614,7 @@ static int shadow_hash_alloc(struct doma
 {
     struct shadow_page_info **table;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(!d->arch.shadow.hash_table);
 
     table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
@@ -1629,7 +1629,7 @@ static int shadow_hash_alloc(struct doma
  * This function does not care whether the table is populated. */
 static void shadow_hash_teardown(struct domain *d)
 {
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d->arch.shadow.hash_table);
 
     xfree(d->arch.shadow.hash_table);
@@ -1645,7 +1645,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
     struct shadow_page_info *sp, *prev;
     key_t key;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d->arch.shadow.hash_table);
     ASSERT(t);
 
@@ -1699,7 +1699,7 @@ void shadow_hash_insert(struct vcpu *v, 
     struct shadow_page_info *sp;
     key_t key;
     
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d->arch.shadow.hash_table);
     ASSERT(t);
 
@@ -1725,7 +1725,7 @@ void shadow_hash_delete(struct vcpu *v, 
     struct shadow_page_info *sp, *x;
     key_t key;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d->arch.shadow.hash_table);
     ASSERT(t);
 
@@ -1780,7 +1780,7 @@ static void hash_foreach(struct vcpu *v,
     struct shadow_page_info *x;
 
     /* Say we're here, to stop hash-lookups reordering the chains */
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d->arch.shadow.hash_walking == 0);
     d->arch.shadow.hash_walking = 1;
 
@@ -1937,7 +1937,7 @@ int shadow_remove_write_access(struct vc
         ;
     struct page_info *pg = mfn_to_page(gmfn);
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
 
     /* Only remove writable mappings if we are doing shadow refcounts.
      * In guest refcounting, we trust Xen to already be restricting
@@ -2129,7 +2129,7 @@ int shadow_remove_all_mappings(struct vc
     if ( (page->count_info & PGC_count_mask) == 0 )
         return 0;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
 
     /* XXX TODO: 
      * Heuristics for finding the (probably) single mapping of this gmfn */
@@ -2296,7 +2296,7 @@ void sh_remove_shadows(struct vcpu *v, m
         0  /* unused  */
     };
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(!(all && fast));
 
     pg = mfn_to_page(gmfn);
@@ -2382,7 +2382,7 @@ void sh_update_paging_modes(struct vcpu 
     struct shadow_paging_mode *old_mode = v->arch.shadow.mode;
     mfn_t old_guest_table;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
 
     // Valid transitions handled by this function:
     // - For PV guests:
@@ -2560,7 +2560,7 @@ static void sh_new_mode(struct domain *d
 {
     struct vcpu *v;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(d != current->domain);
     d->arch.shadow.mode = new_mode;
     if ( new_mode & SHM2_translate ) 
@@ -2661,7 +2661,7 @@ void shadow_teardown(struct domain *d)
     ASSERT(test_bit(_DOMF_dying, &d->domain_flags));
     ASSERT(d != current->domain);
 
-    if ( !shadow_lock_is_acquired(d) )
+    if ( !shadow_locked_by_me(d) )
         shadow_lock(d); /* Keep various asserts happy */
 
     if ( shadow_mode_enabled(d) )
@@ -2744,7 +2744,7 @@ static int shadow_one_bit_enable(struct 
 static int shadow_one_bit_enable(struct domain *d, u32 mode)
 /* Turn on a single shadow mode feature */
 {
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
 
     /* Sanity check the call */
     if ( d == current->domain || (d->arch.shadow.mode & mode) )
@@ -2773,7 +2773,7 @@ static int shadow_one_bit_disable(struct
 /* Turn off a single shadow mode feature */
 {
     struct vcpu *v;
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
 
     /* Sanity check the call */
     if ( d == current->domain || !(d->arch.shadow.mode & mode) )
@@ -3134,7 +3134,7 @@ void sh_do_mark_dirty(struct domain *d, 
 {
     unsigned long pfn;
 
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     ASSERT(shadow_mode_log_dirty(d));
 
     if ( !mfn_valid(gmfn) )
diff -r bcaf2f568ebd -r d9685264b28c xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Dec 20 11:39:22 2006 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Dec 20 11:53:01 2006 +0000
@@ -227,7 +227,7 @@ static inline int
 static inline int 
 guest_walk_tables(struct vcpu *v, unsigned long va, walk_t *gw, int guest_op)
 {
-    ASSERT(!guest_op || shadow_lock_is_acquired(v->domain));
+    ASSERT(!guest_op || shadow_locked_by_me(v->domain));
 
     perfc_incrc(shadow_guest_walk);
     memset(gw, 0, sizeof(*gw));
@@ -442,7 +442,7 @@ static u32 guest_set_ad_bits(struct vcpu
 
     ASSERT(ep && !(((unsigned long)ep) & ((sizeof *ep) - 1)));
     ASSERT(level <= GUEST_PAGING_LEVELS);
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
 
     flags = guest_l1e_get_flags(*ep);
 
@@ -2657,6 +2657,18 @@ static int sh_page_fault(struct vcpu *v,
     }
 #endif /* SHOPT_FAST_FAULT_PATH */
 
+    /* Detect if this page fault happened while we were already in Xen
+     * doing a shadow operation.  If that happens, the only thing we can
+     * do is let Xen's normal fault handlers try to fix it.  In any case, 
+     * a diagnostic trace of the fault will be more useful than 
+     * a BUG() when we try to take the lock again. */
+    if ( unlikely(shadow_locked_by_me(d)) )
+    {
+        SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
+                     d->arch.shadow.locker_function);
+        return 0;
+    }
+
     shadow_lock(d);
     
     shadow_audit_tables(v);
@@ -3343,7 +3355,7 @@ sh_update_cr3(struct vcpu *v)
     u32 guest_idx=0;
 #endif
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(v->arch.shadow.mode);
 
     ////
@@ -3837,7 +3849,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
     if ( vaddr & (bytes-1) )
         return X86EMUL_UNHANDLEABLE;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE);
 
     if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
@@ -3865,7 +3877,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     unsigned long prev;
     int rv = X86EMUL_CONTINUE;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(bytes <= sizeof(unsigned long));
 
     if ( vaddr & (bytes-1) )
@@ -3914,7 +3926,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     u64 old, new, prev;
     int rv = X86EMUL_CONTINUE;
 
-    ASSERT(shadow_lock_is_acquired(v->domain));
+    ASSERT(shadow_locked_by_me(v->domain));
 
     if ( vaddr & 7 )
         return X86EMUL_UNHANDLEABLE;
diff -r bcaf2f568ebd -r d9685264b28c xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Dec 20 11:39:22 2006 +0000
+++ b/xen/include/asm-x86/shadow.h      Wed Dec 20 11:53:01 2006 +0000
@@ -105,7 +105,7 @@
         (_d)->arch.shadow.locker_function = "nobody";   \
     } while (0)
 
-#define shadow_lock_is_acquired(_d)                     \
+#define shadow_locked_by_me(_d)                     \
     (current->processor == (_d)->arch.shadow.locker)
 
 #define shadow_lock(_d)                                                 \
@@ -337,7 +337,7 @@ static inline void mark_dirty(struct dom
 /* Internal version, for when the shadow lock is already held */
 static inline void sh_mark_dirty(struct domain *d, mfn_t gmfn)
 {
-    ASSERT(shadow_lock_is_acquired(d));
+    ASSERT(shadow_locked_by_me(d));
     if ( unlikely(shadow_mode_log_dirty(d)) )
         sh_do_mark_dirty(d, gmfn);
 }
@@ -552,7 +552,7 @@ extern void sh_remove_shadows(struct vcp
 extern void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
 {
-    int was_locked = shadow_lock_is_acquired(v->domain);
+    int was_locked = shadow_locked_by_me(v->domain);
     if ( !was_locked )
         shadow_lock(v->domain);
     sh_remove_shadows(v, gmfn, 0, 1);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-3.0.4-testing] [XEN] Better diagnostics of recursive shadow faults, Xen patchbot-3.0.4-testing <=