WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 11 of 13] Implement lazy MMU update hooks which are S

To: Andrew Morton <akpm@xxxxxxxx>
Subject: [Xen-devel] [PATCH 11 of 13] Implement lazy MMU update hooks which are SMP safe for both direct and
From: Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>
Date: Tue, 01 Aug 2006 13:00:49 -0700
Cc: Virtualization <virtualization@xxxxxxxx>, Zachary Amsden <zach@xxxxxxxxxx>, Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>, Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>, Hollis Blanchard <hollisb@xxxxxxxxxx>, Rusty Russell <rusty@xxxxxxxxxxxxxxx>, Linux Kernel <linux-kernel@xxxxxxxxxxxxxxx>, Chris Wright <chrisw@xxxxxxxxxxxx>, Ian Pratt <ian.pratt@xxxxxxxxxxxxx>, "Eric W. Biederman" <ebiederm@xxxxxxxxxxxx>, Gerd Hoffmann <kraxel@xxxxxxx>, Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>, Christoph Lameter <clameter@xxxxxxx>
Delivery-date: Tue, 01 Aug 2006 13:16:21 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1154462438@ezr>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
5 files changed, 34 insertions(+)
include/asm-generic/pgtable.h |   20 ++++++++++++++++++++
mm/memory.c                   |    8 ++++++++
mm/mprotect.c                 |    2 ++
mm/mremap.c                   |    2 ++
mm/msync.c                    |    2 ++


shadow page tables.  The idea is that PTE updates and page invalidations
while in lazy mode can be batched into a single hypercall.  We use this
in VMI for shadow page table synchronization, and it is a win.  It also
can be used by PPC and for direct page tables on Xen.

For SMP, the enter / leave must happen under protection of the page table
locks for page tables which are being modified.  This is because otherwise,
you end up with stale state in the batched hypercall, which other CPUs can
race ahead of.  Doing this under the protection of the locks guarantees
the synchronization is correct, and also means that spurious faults which
are generated during this window by remote CPUs are properly handled, as
the page fault handler must re-check the PTE under protection of the same
lock.

Signed-off-by: Zachary Amsden <zach@xxxxxxxxxx>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xxxxxxxxxxxxx>

===================================================================

diff -r 553154516a1b -r 398f8fd6b334 include/asm-generic/pgtable.h
--- a/include/asm-generic/pgtable.h     Tue Aug 01 01:32:00 2006 -0700
+++ b/include/asm-generic/pgtable.h     Tue Aug 01 01:32:01 2006 -0700
@@ -164,6 +164,26 @@ static inline void ptep_set_wrprotect(st
 #endif
 
 /*
+ * A facility to provide lazy MMU batching.  This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued.  Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window.  Note that using this
+ * interface requires that read hazards be removed from the code.  A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date.  This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified.  In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode()     do {} while (0)
+#define arch_leave_lazy_mmu_mode()     do {} while (0)
+#endif
+
+/*
  * When walking page tables, get the address of the next boundary,
  * or the end address of the range if that comes earlier.  Although no
  * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
diff -r 553154516a1b -r 398f8fd6b334 mm/memory.c
--- a/mm/memory.c       Tue Aug 01 01:32:00 2006 -0700
+++ b/mm/memory.c       Tue Aug 01 01:32:01 2006 -0700
@@ -505,6 +505,7 @@ again:
        src_pte = pte_offset_map_nested(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+       arch_enter_lazy_mmu_mode();
 
        do {
                /*
@@ -526,6 +527,7 @@ again:
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 
+       arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
        pte_unmap_nested(src_pte - 1);
        add_mm_rss(dst_mm, rss[0], rss[1]);
@@ -627,6 +629,7 @@ static unsigned long zap_pte_range(struc
        int anon_rss = 0;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = *pte;
                if (pte_none(ptent)) {
@@ -693,6 +696,7 @@ static unsigned long zap_pte_range(struc
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
        add_mm_rss(mm, file_rss, anon_rss);
+       arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
        return addr;
@@ -1108,6 +1112,7 @@ static int zeromap_pte_range(struct mm_s
        pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
+       arch_enter_lazy_mmu_mode();
        do {
                struct page *page = ZERO_PAGE(addr);
                pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
@@ -1117,6 +1122,7 @@ static int zeromap_pte_range(struct mm_s
                BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, zero_pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
        return 0;
 }
@@ -1269,11 +1275,13 @@ static int remap_pte_range(struct mm_str
        pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
+       arch_enter_lazy_mmu_mode();
        do {
                BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
        return 0;
 }
diff -r 553154516a1b -r 398f8fd6b334 mm/mprotect.c
--- a/mm/mprotect.c     Tue Aug 01 01:32:00 2006 -0700
+++ b/mm/mprotect.c     Tue Aug 01 01:32:01 2006 -0700
@@ -33,6 +33,7 @@ static void change_pte_range(struct mm_s
        spinlock_t *ptl;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       arch_enter_lazy_mmu_mode();
        do {
                oldpte = *pte;
                if (pte_present(oldpte)) {
@@ -62,6 +63,7 @@ static void change_pte_range(struct mm_s
                }
 
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 }
 
diff -r 553154516a1b -r 398f8fd6b334 mm/mremap.c
--- a/mm/mremap.c       Tue Aug 01 01:32:00 2006 -0700
+++ b/mm/mremap.c       Tue Aug 01 01:32:01 2006 -0700
@@ -98,6 +98,7 @@ static void move_ptes(struct vm_area_str
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+       arch_enter_lazy_mmu_mode();
 
        for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
                                   new_pte++, new_addr += PAGE_SIZE) {
@@ -109,6 +110,7 @@ static void move_ptes(struct vm_area_str
                set_pte_at(mm, new_addr, new_pte, pte);
        }
 
+       arch_leave_lazy_mmu_mode();
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        pte_unmap_nested(new_pte - 1);
diff -r 553154516a1b -r 398f8fd6b334 mm/msync.c
--- a/mm/msync.c        Tue Aug 01 01:32:00 2006 -0700
+++ b/mm/msync.c        Tue Aug 01 01:32:01 2006 -0700
@@ -30,6 +30,7 @@ static unsigned long msync_pte_range(str
 
 again:
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       arch_enter_lazy_mmu_mode();
        do {
                struct page *page;
 
@@ -51,6 +52,7 @@ again:
                        ret += set_page_dirty(page);
                progress += 3;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
        if (addr != end)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel