[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/shadow: account for ioreq server pages before complaining about not found mapping



prepare_ring_for_helper(), just like share_xen_page_with_guest(),
takes a write reference on the page, and hence should similarly be
accounted for when determining whether to log a complaint.

This requires using recursive locking for the ioreq server lock, as the
offending invocation of sh_remove_all_mappings() is down the call stack
from hvm_set_ioreq_server_state(). (While not strictly needed to be
done in all other instances too, convert all of them for consistency.)

At once improve the usefulness of the shadow error message: Log all
values involved in triggering it as well as the GFN (to aid
understanding which guest page it is that there is a problem with - in
cases like the one here the GFN is invariant across invocations, while
the MFN obviously can [and will] vary).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -240,6 +240,30 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+    const struct hvm_ioreq_server *s;
+    bool_t found = 0;
+
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server.list,
+                          list_entry )
+    {
+        if ( (s->ioreq.va && s->ioreq.page == page) ||
+             (s->bufioreq.va && s->bufioreq.page == page) )
+        {
+            found = 1;
+            break;
+        }
+    }
+
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    return found;
+}
+
 static void hvm_remove_ioreq_gmfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
@@ -671,7 +695,7 @@ int hvm_create_ioreq_server(struct domai
         goto fail1;
 
     domain_pause(d);
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -EEXIST;
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -694,14 +718,14 @@ int hvm_create_ioreq_server(struct domai
     if ( id )
         *id = s->id;
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail3:
  fail2:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -714,7 +738,7 @@ int hvm_destroy_ioreq_server(struct doma
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -743,7 +767,7 @@ int hvm_destroy_ioreq_server(struct doma
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -756,7 +780,7 @@ int hvm_get_ioreq_server_info(struct dom
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -781,7 +805,7 @@ int hvm_get_ioreq_server_info(struct dom
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -793,7 +817,7 @@ int hvm_map_io_range_to_ioreq_server(str
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -833,7 +857,7 @@ int hvm_map_io_range_to_ioreq_server(str
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -845,7 +869,7 @@ int hvm_unmap_io_range_from_ioreq_server
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -885,7 +909,7 @@ int hvm_unmap_io_range_from_ioreq_server
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -896,7 +920,7 @@ int hvm_set_ioreq_server_state(struct do
     struct list_head *entry;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each ( entry,
@@ -925,7 +949,7 @@ int hvm_set_ioreq_server_state(struct do
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
@@ -934,7 +958,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
@@ -947,7 +971,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
             goto fail;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return 0;
 
@@ -957,7 +981,7 @@ int hvm_all_ioreq_servers_add_vcpu(struc
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -966,21 +990,21 @@ void hvm_all_ioreq_servers_remove_vcpu(s
 {
     struct hvm_ioreq_server *s;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
     struct hvm_ioreq_server *s, *next;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1003,7 +1027,7 @@ void hvm_destroy_all_ioreq_servers(struc
         xfree(s);
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1027,7 +1051,7 @@ int hvm_set_dm_domain(struct domain *d,
     struct hvm_ioreq_server *s;
     int rc = 0;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /*
      * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1076,7 +1100,7 @@ int hvm_set_dm_domain(struct domain *d,
     domain_unpause(d);
 
  done:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -35,6 +35,7 @@
 #include <asm/current.h>
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
+#include <asm/hvm/ioreq.h>
 #include <xen/numa.h>
 #include "private.h"
 
@@ -2591,7 +2592,8 @@ int sh_remove_write_access_from_sl1p(str
 /* Remove all mappings of a guest frame from the shadow tables.
  * Returns non-zero if we need to flush TLBs. */
 
-static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn)
+static int sh_remove_all_mappings(struct domain *d, mfn_t gmfn,
+                                  unsigned long gfn)
 {
     struct page_info *page = mfn_to_page(gmfn);
 
@@ -2643,19 +2645,24 @@ static int sh_remove_all_mappings(struct
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings:
+        /*
+         * Don't complain if we're in HVM and there are some extra mappings:
          * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
-         * Also allow one typed refcount for xenheap pages, to match
-         * share_xen_page_with_guest(). */
+         * Also allow one typed refcount for
+         * - Xen heap pages, to match share_xen_page_with_guest(),
+         * - ioreq server pages, to match prepare_ring_for_helper().
+         */
         if ( !(shadow_mode_external(d)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
-                   == !!is_xen_heap_page(page))) )
+                   == (is_xen_heap_page(page) ||
+                       is_ioreq_server_page(d, page)))) )
         {
-            SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn),
-                          page->count_info, page->u.inuse.type_info);
+            SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+                          "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn,
+                          page->count_info, page->u.inuse.type_info,
+                          !!is_xen_heap_page(page), is_ioreq_server_page(d, 
page));
         }
     }
 
@@ -3515,7 +3522,7 @@ static void sh_unshadow_for_p2m_change(s
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
         {
             sh_remove_all_shadows_and_parents(d, mfn);
-            if ( sh_remove_all_mappings(d, mfn) )
+            if ( sh_remove_all_mappings(d, mfn, gfn) )
                 flush_tlb_mask(d->domain_dirty_cpumask);
         }
     }
@@ -3550,7 +3557,8 @@ static void sh_unshadow_for_p2m_change(s
                 {
                     /* This GFN->MFN mapping has gone away */
                     sh_remove_all_shadows_and_parents(d, omfn);
-                    if ( sh_remove_all_mappings(d, omfn) )
+                    if ( sh_remove_all_mappings(d, omfn,
+                                                gfn + (i << PAGE_SHIFT)) )
                         cpumask_or(&flushmask, &flushmask,
                                    d->domain_dirty_cpumask);
                 }
@@ -3766,7 +3774,8 @@ int shadow_track_dirty_vram(struct domai
                         dirty = 1;
                         /* TODO: Heuristics for finding the single mapping of
                          * this gmfn */
-                        flush_tlb |= sh_remove_all_mappings(d, mfn);
+                        flush_tlb |= sh_remove_all_mappings(d, mfn,
+                                                            begin_pfn + i);
                     }
                     else
                     {
--- a/xen/include/asm-x86/hvm/ioreq.h
+++ b/xen/include/asm-x86/hvm/ioreq.h
@@ -21,6 +21,7 @@
 
 bool_t hvm_io_pending(struct vcpu *v);
 bool_t handle_hvm_io_completion(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
 
 int hvm_create_ioreq_server(struct domain *d, domid_t domid,
                             bool_t is_default, int bufioreq_handling,


Attachment: x86-sh-ioreqsrv-pages.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.