[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [xen-unstable test] 56456: regressions - FAIL



El 16/05/15 a les 10.51, osstest service user ha escrit:
> flight 56456 xen-unstable real [real]
> http://logs.test-lab.xenproject.org/osstest/logs/56456/
> 
> Regressions :-(

This is my fault, paging_gva_to_gfn cannot be used to translate a PV 
guest VA to a GFN. The patch above restores the previous path for PV 
callers.

---
commit 4cfaf46cbb116ce8dd0124bbbea319489db4b068
Author: Roger Pau Monne <roger.pau@xxxxxxxxxx>
Date:   Sat May 16 13:42:39 2015 +0200

    x86: restore PV path on paging_log_dirty_op
    
    Restore previous path for PV domains calling paging_log_dirty_op. This fixes
    the fallout from a809eeea06d20b115d78f12e473502bcb6209844.
    
    Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 5eee88c..2f51126 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -416,6 +416,7 @@ static inline void 
*map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
     unsigned long gfn;
     p2m_type_t p2mt;
 
+    ASSERT(paging_mode_enabled(current->domain));
     gfn = paging_gva_to_gfn(current,
                             (unsigned long)(dirty_bitmap.p + (pages >> 3)),
                             &pfec);
@@ -446,6 +447,7 @@ static inline void 
*map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
 
 static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
 {
+    ASSERT(paging_mode_enabled(current->domain));
     if ( addr != NULL )
     {
         unmap_domain_page(addr);
@@ -465,9 +467,9 @@ static int paging_log_dirty_op(struct domain *d,
     mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
     unsigned long *l1 = NULL;
     int i4, i3, i2;
-    uint8_t *dirty_bitmap;
-    struct page_info *page;
-    unsigned long index_mapped;
+    uint8_t *dirty_bitmap = NULL;
+    struct page_info *page = NULL;
+    unsigned long index_mapped = 0;
 
  again:
     if ( !resuming )
@@ -482,12 +484,15 @@ static int paging_log_dirty_op(struct domain *d,
         p2m_flush_hardware_cached_dirty(d);
     }
 
-    index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
-    dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
-    if ( dirty_bitmap == NULL )
+    if ( paging_mode_enabled(current->domain) )
     {
-        domain_unpause(d);
-        return -EFAULT;
+        index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
+        dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
+        if ( dirty_bitmap == NULL )
+        {
+            domain_unpause(d);
+            return -EFAULT;
+        }
     }
 
     paging_lock(d);
@@ -549,7 +554,8 @@ static int paging_log_dirty_op(struct domain *d,
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
                 {
-                    if ( pages >> (3 + PAGE_SHIFT) !=
+                    if ( paging_mode_enabled(current->domain) &&
+                         pages >> (3 + PAGE_SHIFT) !=
                          index_mapped >> (3 + PAGE_SHIFT) )
                     {
                         /* We need to map next page */
@@ -564,13 +570,30 @@ static int paging_log_dirty_op(struct domain *d,
                         unmap_dirty_bitmap(dirty_bitmap, page);
                         goto again;
                     }
-                    ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
-                    if ( l1 )
-                        memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
-                               bytes);
+
+                    if ( paging_mode_enabled(current->domain) )
+                    {
+                        ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= 
PAGE_SIZE);
+                        if ( l1 )
+                            memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE),
+                                   l1, bytes);
+                        else
+                            memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE),
+                                   0, bytes);
+                    }
                     else
-                        memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
-                               bytes);
+                    {
+                        if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
+                                                        pages >> 3,
+                                                        (uint8_t *)l1,
+                                                        bytes)
+                                 : clear_guest_offset(sc->dirty_bitmap,
+                                                      pages >> 3, bytes)) != 0 
)
+                        {
+                            rv = -EFAULT;
+                            goto out;
+                        }
+                    }
                 }
                 pages += bytes << 3;
                 if ( l1 )
@@ -630,7 +653,8 @@ static int paging_log_dirty_op(struct domain *d,
     if ( rv )
     {
         /* Never leave the domain paused on real errors. */
-        unmap_dirty_bitmap(dirty_bitmap, page);
+        if ( paging_mode_enabled(current->domain) )
+            unmap_dirty_bitmap(dirty_bitmap, page);
         ASSERT(rv == -ERESTART);
         return rv;
     }
@@ -643,14 +667,16 @@ static int paging_log_dirty_op(struct domain *d,
          * paging modes (shadow or hap).  Safe because the domain is paused. */
         d->arch.paging.log_dirty.clean_dirty_bitmap(d);
     }
-    unmap_dirty_bitmap(dirty_bitmap, page);
+    if ( paging_mode_enabled(current->domain) )
+        unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
     return rv;
 
  out:
     d->arch.paging.preempt.dom = NULL;
     paging_unlock(d);
-    unmap_dirty_bitmap(dirty_bitmap, page);
+    if ( paging_mode_enabled(current->domain) )
+        unmap_dirty_bitmap(dirty_bitmap, page);
     domain_unpause(d);
 
     if ( l1 )


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.