WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 11/15] xen/mmu: tune pgtable alloc/release

To: Xen Devel <Xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 11/15] xen/mmu: tune pgtable alloc/release
From: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Date: Mon, 20 Jun 2011 15:15:07 -0700
Cc: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>, Ingo Molnar <mingo@xxxxxxxxxx>, Linux Kernel Mailing List <linux-kernel@xxxxxxxxxxxxxxx>, Steven Rostedt <rostedt@xxxxxxxxxxx>, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Delivery-date: Mon, 20 Jun 2011 15:35:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <cover.1308607697.git.jeremy.fitzhardinge@xxxxxxxxxx>
In-reply-to: <cover.1308607697.git.jeremy.fitzhardinge@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <cover.1308607697.git.jeremy.fitzhardinge@xxxxxxxxxx>
References: <cover.1308607697.git.jeremy.fitzhardinge@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
From: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>

Make sure the fastpath code is inlined.  Batch the page permission change
and the pin/unpin, and make sure that it can be batched with any
adjacent set_pte/pmd/etc operations.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
 arch/x86/xen/mmu.c |   49 ++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 42 insertions(+), 7 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 112259f..ebfdc3d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1492,22 +1492,52 @@ static void __init xen_release_pmd_init(unsigned long 
pfn)
        make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
 }
 
+static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+       struct multicall_space mcs;
+       struct mmuext_op *op;
+
+       mcs = __xen_mc_entry(sizeof(*op));
+       op = mcs.args;
+       op->cmd = cmd;
+       op->arg1.mfn = pfn_to_mfn(pfn);
+
+       MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+}
+
+static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+       struct multicall_space mcs;
+       unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+       mcs = __xen_mc_entry(0);
+       MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
+                               pfn_pte(pfn, prot), 0);
+}
+
 /* This needs to make sure the new pte page is pinned iff its being
    attached to a pinned pagetable. */
-static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned 
level)
+static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
+                                   unsigned level)
 {
-       struct page *page = pfn_to_page(pfn);
        int pinned = PagePinned(virt_to_page(mm->pgd));
  
        trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
 
        if (pinned) {
+               struct page *page = pfn_to_page(pfn);
+
                SetPagePinned(page);
 
                if (!PageHighMem(page)) {
-                       make_lowmem_page_readonly(__va(PFN_PHYS((unsigned 
long)pfn)));
+                       xen_mc_batch();
+
+                       __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+
                        if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+                               __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+
+                       xen_mc_issue(PARAVIRT_LAZY_MMU);
                } else {
                        /* make sure there are no stray mappings of
                           this page */
@@ -1527,7 +1557,7 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned 
long pfn)
 }
 
 /* This should never happen until we're OK to use struct page */
-static void xen_release_ptpage(unsigned long pfn, unsigned level)
+static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
 {
        struct page *page = pfn_to_page(pfn);
        bool pinned = PagePinned(page);
@@ -1536,9 +1566,14 @@ static void xen_release_ptpage(unsigned long pfn, 
unsigned level)
 
        if (pinned) {
                if (!PageHighMem(page)) {
+                       xen_mc_batch();
+
                        if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
-                       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+                               __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+
+                       __set_pfn_prot(pfn, PAGE_KERNEL);
+
+                       xen_mc_issue(PARAVIRT_LAZY_MMU);
                }
                ClearPagePinned(page);
        }
-- 
1.7.5.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>