WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] xenpaging: handle evict failures

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH] xenpaging: handle evict failures
From: Olaf Hering <olaf@xxxxxxxxx>
Date: Tue, 11 Oct 2011 14:36:10 +0200
Delivery-date: Tue, 11 Oct 2011 05:36:48 -0700
Dkim-signature: v=1; a=rsa-sha1; c=relaxed/relaxed; t=1318336574; l=7149; s=domk; d=aepfle.de; h=To:From:Date:Subject:Content-Transfer-Encoding:MIME-Version: Content-Type:X-RZG-CLASS-ID:X-RZG-AUTH; bh=dCwmgzune2RcV4daKvl+QaKfmKU=; b=AjCNwS6XJTX5MgkXAKM7y/p331uHmxd3lDbzwGRszK4BjtVS4g1BhTPdB6xTdMHNGwU DjJkFCaWxxukEQODt1rPdcIuN+QV4BdaHvlMbn9o8eVq3bmsUhBu8vyY5VAaRjRNqJtpJ uO1boOwU8cX5NL0cmXU3nnnlako//SErnMw=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.7.5
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1318336551 -7200
# Node ID bdd49540f1e1c803d01c88adb67c6ce01e2d00d8
# Parent  4b0907c6a08c348962bd976c2976257b412408be
xenpaging: handle evict failures

Evict of a nominated gfn must fail if some other process mapped the
page without checking the p2mt of that gfn first.
Add a check to cancel eviction if the page usage count is not 1.

Handle the possible eviction failure in the page-in paths.
After nominate and before evict, something may check the p2mt and call
populate. Handle this case and let the gfn enter the page-in path. The
gfn may still be connected to a mfn, so there is no need to allocate a
new page in prep.

Adjust do_mmu_update to return -ENOENT only if the gfn has entered the
page-in path and if it is not yet connected to a mfn. Otherwise
linux_privcmd_map_foreign_bulk() may loop forever.

Add MEM_EVENT_FLAG_EVICT_FAIL to inform pager that a page-in request for
a possible not-evicted page was sent. xenpaging does currently not need
that flag because failure to evict a gfn will be caught.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

diff -r 4b0907c6a08c -r bdd49540f1e1 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -734,10 +734,12 @@ int main(int argc, char *argv[])
             }
             else
             {
-                DPRINTF("page already populated (domain = %d; vcpu = %d;"
-                        " gfn = %"PRIx64"; paused = %d)\n",
-                        paging->mem_event.domain_id, req.vcpu_id,
-                        req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
+                DPRINTF("page %s populated (domain = %d; vcpu = %d;"
+                        " gfn = %"PRIx64"; paused = %d; evict_fail = %d)\n",
+                        req.flags & MEM_EVENT_FLAG_EVICT_FAIL ? "not" : 
"already",
+                        paging->mem_event.domain_id, req.vcpu_id, req.gfn,
+                        !!(req.flags & MEM_EVENT_FLAG_VCPU_PAUSED) ,
+                        !!(req.flags & MEM_EVENT_FLAG_EVICT_FAIL) );
 
                 /* Tell Xen to resume the vcpu */
                 /* XXX: Maybe just check if the vcpu was paused? */
diff -r 4b0907c6a08c -r bdd49540f1e1 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3532,7 +3532,7 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l1e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l1e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3572,7 +3572,7 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l2e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l2e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3600,7 +3600,7 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l3e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l3e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
@@ -3628,7 +3628,7 @@ int do_mmu_update(
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l4e_p2mt )
+                    else if ( p2m_ram_paging_in_start == l4e_p2mt && 
!mfn_valid(mfn) )
                     {
                         rc = -ENOENT;
                         break;
diff -r 4b0907c6a08c -r bdd49540f1e1 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -731,15 +731,24 @@ int p2m_mem_paging_evict(struct domain *
     if ( unlikely(!mfn_valid(mfn)) )
         goto out;
 
-    if ( (p2mt == p2m_ram_paged) || (p2mt == p2m_ram_paging_in) ||
-         (p2mt == p2m_ram_paging_in_start) )
+    /* Allow only nominated pages */
+    if ( p2mt != p2m_ram_paging_out )
         goto out;
 
+    ret = -EBUSY;
     /* Get the page so it doesn't get modified under Xen's feet */
     page = mfn_to_page(mfn);
     if ( unlikely(!get_page(page, d)) )
         goto out;
 
+    /* Check page count and type once more */
+    if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !=
+         (2 | PGC_allocated) )
+        goto out_put;
+
+    if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
+        goto out_put;
+
     /* Decrement guest domain's ref count of the page */
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
@@ -751,14 +760,15 @@ int p2m_mem_paging_evict(struct domain *
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
 
-    /* Put the page back so it gets freed */
-    put_page(page);
-
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
     ret = 0;
 
+ out_put:
+    /* Put the page back so it gets freed */
+    put_page(page);
+
  out:
     p2m_unlock(p2m);
     return ret;
@@ -788,6 +798,7 @@ void p2m_mem_paging_populate(struct doma
     mem_event_request_t req;
     p2m_type_t p2mt;
     p2m_access_t a;
+    mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
@@ -799,21 +810,26 @@ void p2m_mem_paging_populate(struct doma
 
     /* Fix p2m mapping */
     p2m_lock(p2m);
-    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
-    if ( p2mt == p2m_ram_paged )
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+    /* Allow only nominated or evicted pages to enter page-in path */
+    if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
     {
-        set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), 0, 
-                      p2m_ram_paging_in_start, a);
+        /* Evict will fail now, tag this request for pager */
+        if ( p2mt == p2m_ram_paging_out )
+            req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
+
+        set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in_start, a);
         audit_p2m(p2m, 1);
     }
     p2m_unlock(p2m);
 
-    /* Pause domain */
-    if ( v->domain->domain_id == d->domain_id )
+    /* Pause domain if request came from guest and gfn has paging type */
+    if (  p2m_is_paging(p2mt) && v->domain->domain_id == d->domain_id )
     {
         vcpu_pause_nosync(v);
         req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
     }
+    /* No need to inform pager if the gfn is not in the page-out path */
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
@@ -834,20 +850,26 @@ int p2m_mem_paging_prep(struct domain *d
     struct page_info *page;
     p2m_type_t p2mt;
     p2m_access_t a;
+    mfn_t mfn;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int ret = -ENOMEM;
 
     p2m_lock(p2m);
 
-    p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
 
-    /* Get a free page */
-    page = alloc_domheap_page(p2m->domain, 0);
-    if ( unlikely(page == NULL) )
-        goto out;
+    /* Allocate a page if the gfn does not have one yet */
+    if ( !mfn_valid(mfn) )
+    {
+        /* Get a free page */
+        page = alloc_domheap_page(p2m->domain, 0);
+        if ( unlikely(page == NULL) )
+            goto out;
+        mfn = page_to_mfn(page);
+    }
 
     /* Fix p2m mapping */
-    set_p2m_entry(p2m, gfn, page_to_mfn(page), 0, p2m_ram_paging_in, a);
+    set_p2m_entry(p2m, gfn, mfn, 0, p2m_ram_paging_in, a);
     audit_p2m(p2m, 1);
 
     atomic_dec(&d->paged_pages);
diff -r 4b0907c6a08c -r bdd49540f1e1 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -38,6 +38,7 @@
 /* Memory event flags */
 #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
 #define MEM_EVENT_FLAG_DROP_PAGE    (1 << 1)
+#define MEM_EVENT_FLAG_EVICT_FAIL   (1 << 2)
 
 /* Reasons for the memory event request */
 #define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>