WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [linux-2.6.18-xen] linux/x86: batch hypercalls when pinn

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [linux-2.6.18-xen] linux/x86: batch hypercalls when pinning address spaces
From: "Xen patchbot-linux-2.6.18-xen" <patchbot-linux-2.6.18-xen@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 05 Oct 2007 13:40:10 -0700
Delivery-date: Fri, 05 Oct 2007 13:40:46 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1191577913 -3600
# Node ID b0ec211da98add875d74f75a881fae0c6ac4f484
# Parent  6e26ffc60647bd7454d0a066a8ab63ef7f0123af
linux/x86: batch hypercalls when pinning address spaces
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 arch/i386/mm/pgtable-xen.c    |   56 +++++++++++++++++++++-----------
 arch/x86_64/mm/pageattr-xen.c |   73 ++++++++++++++++++++++++------------------
 2 files changed, 80 insertions(+), 49 deletions(-)

diff -r 6e26ffc60647 -r b0ec211da98a arch/i386/mm/pgtable-xen.c
--- a/arch/i386/mm/pgtable-xen.c        Fri Oct 05 10:49:06 2007 +0100
+++ b/arch/i386/mm/pgtable-xen.c        Fri Oct 05 10:51:53 2007 +0100
@@ -552,10 +552,13 @@ static void _pin_lock(struct mm_struct *
 #define pin_lock(mm) _pin_lock(mm, 1)
 #define pin_unlock(mm) _pin_lock(mm, 0)
 
-static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
+#define PIN_BATCH 4
+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
+
+static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
+                                             unsigned int cpu, unsigned seq)
 {
        unsigned long pfn = page_to_pfn(page);
-       int rc;
 
        if (PageHighMem(page)) {
                if (pgprot_val(flags) & _PAGE_RW)
@@ -563,12 +566,18 @@ static inline void pgd_walk_set_prot(str
                else
                        set_bit(PG_pinned, &page->flags);
        } else {
-               rc = HYPERVISOR_update_va_mapping(
-                       (unsigned long)__va(pfn << PAGE_SHIFT),
-                       pfn_pte(pfn, flags), 0);
-               if (rc)
-                       BUG();
-       }
+               MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
+                               (unsigned long)__va(pfn << PAGE_SHIFT),
+                               pfn_pte(pfn, flags), 0);
+               if (unlikely(++seq == PIN_BATCH)) {
+                       if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, 
cpu),
+                                                               PIN_BATCH, 
NULL)))
+                               BUG();
+                       seq = 0;
+               }
+       }
+
+       return seq;
 }
 
 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
@@ -576,37 +585,48 @@ static void pgd_walk(pgd_t *pgd_base, pg
        pgd_t *pgd = pgd_base;
        pud_t *pud;
        pmd_t *pmd;
-       int    g, u, m, rc;
+       int    g, u, m;
+       unsigned int cpu, seq;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return;
 
-       for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+       cpu = get_cpu();
+
+       for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
                if (pgd_none(*pgd))
                        continue;
                pud = pud_offset(pgd, 0);
                if (PTRS_PER_PUD > 1) /* not folded */
-                       pgd_walk_set_prot(virt_to_page(pud),flags);
+                       seq = 
pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
                for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
                        if (pud_none(*pud))
                                continue;
                        pmd = pmd_offset(pud, 0);
                        if (PTRS_PER_PMD > 1) /* not folded */
-                               pgd_walk_set_prot(virt_to_page(pmd),flags);
+                               seq = 
pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
                        for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
                                if (pmd_none(*pmd))
                                        continue;
-                               pgd_walk_set_prot(pmd_page(*pmd),flags);
+                               seq = 
pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
                        }
                }
        }
 
-       rc = HYPERVISOR_update_va_mapping(
-               (unsigned long)pgd_base,
-               pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-               UVMF_TLB_FLUSH);
-       if (rc)
+       if (likely(seq != 0)) {
+               MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
+                       (unsigned long)pgd_base,
+                       pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
+                       UVMF_TLB_FLUSH);
+               if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
+                                                       seq + 1, NULL)))
+                       BUG();
+       } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
+                       pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
+                       UVMF_TLB_FLUSH))
                BUG();
+
+       put_cpu();
 }
 
 static void __pgd_pin(pgd_t *pgd)
diff -r 6e26ffc60647 -r b0ec211da98a arch/x86_64/mm/pageattr-xen.c
--- a/arch/x86_64/mm/pageattr-xen.c     Fri Oct 05 10:49:06 2007 +0100
+++ b/arch/x86_64/mm/pageattr-xen.c     Fri Oct 05 10:51:53 2007 +0100
@@ -78,17 +78,26 @@ static void _pin_lock(struct mm_struct *
 #define pin_lock(mm) _pin_lock(mm, 1)
 #define pin_unlock(mm) _pin_lock(mm, 0)
 
-static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
+#define PIN_BATCH 8
+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
+
+static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
+                                            unsigned int cpu, unsigned int seq)
 {
        struct page *page = virt_to_page(pt);
        unsigned long pfn = page_to_pfn(page);
-       int rc;
-
-       rc = HYPERVISOR_update_va_mapping(
+
+       MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
                (unsigned long)__va(pfn << PAGE_SHIFT),
                pfn_pte(pfn, flags), 0);
-       if (rc)
-               BUG();
+       if (unlikely(++seq == PIN_BATCH)) {
+               if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
+                                                       PIN_BATCH, NULL)))
+                       BUG();
+               seq = 0;
+       }
+
+       return seq;
 }
 
 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
@@ -98,8 +107,12 @@ static void mm_walk(struct mm_struct *mm
        pmd_t       *pmd;
        pte_t       *pte;
        int          g,u,m;
+       unsigned int cpu, seq;
+       multicall_entry_t *mcl;
 
        pgd = mm->pgd;
+       cpu = get_cpu();
+
        /*
         * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
         * be the 'current' task's pagetables (e.g., current may be 32-bit,
@@ -107,26 +120,45 @@ static void mm_walk(struct mm_struct *mm
         * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
         * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
         */
-       for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
+       for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
                if (pgd_none(*pgd))
                        continue;
                pud = pud_offset(pgd, 0);
                if (PTRS_PER_PUD > 1) /* not folded */ 
-                       mm_walk_set_prot(pud,flags);
+                       seq = mm_walk_set_prot(pud,flags,cpu,seq);
                for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
                        if (pud_none(*pud))
                                continue;
                        pmd = pmd_offset(pud, 0);
                        if (PTRS_PER_PMD > 1) /* not folded */ 
-                               mm_walk_set_prot(pmd,flags);
+                               seq = mm_walk_set_prot(pmd,flags,cpu,seq);
                        for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
                                if (pmd_none(*pmd))
                                        continue;
                                pte = pte_offset_kernel(pmd,0);
-                               mm_walk_set_prot(pte,flags);
+                               seq = mm_walk_set_prot(pte,flags,cpu,seq);
                        }
                }
        }
+
+       mcl = per_cpu(pb_mcl, cpu);
+       if (unlikely(seq > PIN_BATCH - 2)) {
+               if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
+                       BUG();
+               seq = 0;
+       }
+       MULTI_update_va_mapping(mcl + seq,
+              (unsigned long)__user_pgd(mm->pgd),
+              pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
+              0);
+       MULTI_update_va_mapping(mcl + seq + 1,
+              (unsigned long)mm->pgd,
+              pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
+              UVMF_TLB_FLUSH);
+       if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
+               BUG();
+
+       put_cpu();
 }
 
 void mm_pin(struct mm_struct *mm)
@@ -137,17 +169,6 @@ void mm_pin(struct mm_struct *mm)
        pin_lock(mm);
 
        mm_walk(mm, PAGE_KERNEL_RO);
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)mm->pgd,
-               pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-               UVMF_TLB_FLUSH))
-               BUG();
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)__user_pgd(mm->pgd),
-               pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-                       PAGE_KERNEL_RO),
-               UVMF_TLB_FLUSH))
-               BUG();
        xen_pgd_pin(__pa(mm->pgd)); /* kernel */
        xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
        mm->context.pinned = 1;
@@ -167,17 +188,7 @@ void mm_unpin(struct mm_struct *mm)
 
        xen_pgd_unpin(__pa(mm->pgd));
        xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)mm->pgd,
-               pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0))
-               BUG();
-       if (HYPERVISOR_update_va_mapping(
-               (unsigned long)__user_pgd(mm->pgd),
-               pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT,
-                       PAGE_KERNEL), 0))
-               BUG();
        mm_walk(mm, PAGE_KERNEL);
-       xen_tlb_flush();
        mm->context.pinned = 0;
        spin_lock(&mm_unpinned_lock);
        list_add(&mm->context.unpinned, &mm_unpinned);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [linux-2.6.18-xen] linux/x86: batch hypercalls when pinning address spaces, Xen patchbot-linux-2.6.18-xen <=