WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] More cleanups to the pmd-shared patch.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] More cleanups to the pmd-shared patch.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 26 Nov 2005 14:08:08 +0000
Delivery-date: Sat, 26 Nov 2005 14:08:24 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 001ba14fbb1bcc91d4e02e70d5c64d661870c398
# Parent  b05e1c4bc31b17b4c4a5c8425f20576cf97e32a5
More cleanups to the pmd-shared patch.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r b05e1c4bc31b -r 001ba14fbb1b 
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sat Nov 26 09:43:27 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sat Nov 26 10:32:57 2005
@@ -278,26 +278,22 @@
        unsigned long flags;
 
        if (PTRS_PER_PMD > 1) {
-#ifdef CONFIG_XEN
                /* Ensure pgd resides below 4GB. */
                int rc = xen_create_contiguous_region(
                        (unsigned long)pgd, 0, 32);
                BUG_ON(rc);
-#endif
                if (HAVE_SHARED_KERNEL_PMD)
                        memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-                              swapper_pg_dir, sizeof(pgd_t));
+                              swapper_pg_dir + USER_PTRS_PER_PGD,
+                              (PTRS_PER_PGD - USER_PTRS_PER_PGD) * 
sizeof(pgd_t));
        } else {
-               if (!HAVE_SHARED_KERNEL_PMD)
-                       spin_lock_irqsave(&pgd_lock, flags);
+               spin_lock_irqsave(&pgd_lock, flags);
                memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
                       swapper_pg_dir + USER_PTRS_PER_PGD,
                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
                memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-               if (!HAVE_SHARED_KERNEL_PMD) {
-                       pgd_list_add(pgd);
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-               }
+               pgd_list_add(pgd);
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 }
 
@@ -305,9 +301,6 @@
 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
-
-       if (HAVE_SHARED_KERNEL_PMD)
-               return;
 
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
@@ -335,18 +328,24 @@
 
        if (!HAVE_SHARED_KERNEL_PMD) {
                unsigned long flags;
-               pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
-               pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
-               pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
-               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-               ++i;
-               if (!pmd)
-                       goto out_oom;
+
+               for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                       pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+                       if (!pmd)
+                               goto out_oom;
+                       set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
+               }
 
                spin_lock_irqsave(&pgd_lock, flags);
-               memcpy(pmd, copy_pmd, PAGE_SIZE);
-               make_lowmem_page_readonly(pmd);
-               set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
+               for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                       unsigned long v = (unsigned long)i << PGDIR_SHIFT;
+                       pgd_t *kpgd = pgd_offset_k(v);
+                       pud_t *kpud = pud_offset(kpgd, v);
+                       pmd_t *kpmd = pmd_offset(kpud, v);
+                       pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
+                       memcpy(pmd, kpmd, PAGE_SIZE);
+                       make_lowmem_page_readonly(pmd);
+               }
                pgd_list_add(pgd);
                spin_unlock_irqrestore(&pgd_lock, flags);
        }
@@ -374,13 +373,15 @@
                }
                if (!HAVE_SHARED_KERNEL_PMD) {
                        unsigned long flags;
-                       pmd_t *pmd = (void 
*)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
                        spin_lock_irqsave(&pgd_lock, flags);
                        pgd_list_del(pgd);
                        spin_unlock_irqrestore(&pgd_lock, flags);
-                       make_lowmem_page_writable(pmd);
-                       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-                       kmem_cache_free(pmd_cache, pmd);
+                       for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                               pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
+                               make_lowmem_page_writable(pmd);
+                               memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+                               kmem_cache_free(pmd_cache, pmd);
+                       }
                }
        }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
diff -r b05e1c4bc31b -r 001ba14fbb1b patches/linux-2.6.12/pmd-shared.patch
--- a/patches/linux-2.6.12/pmd-shared.patch     Sat Nov 26 09:43:27 2005
+++ b/patches/linux-2.6.12/pmd-shared.patch     Sat Nov 26 10:32:57 2005
@@ -11,14 +11,20 @@
  
        spin_lock_irqsave(&pgd_lock, flags);
 diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c 
linux-2.6.12.new/arch/i386/mm/pgtable.c
---- linux-2.6.12/arch/i386/mm/pgtable.c        2005-11-24 21:51:49.000000000 
+0000
-+++ linux-2.6.12.new/arch/i386/mm/pgtable.c    2005-11-24 22:06:04.000000000 
+0000
-@@ -199,19 +199,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
+--- linux-2.6.12/arch/i386/mm/pgtable.c        2005-11-26 09:55:10.000000000 
+0000
++++ linux-2.6.12.new/arch/i386/mm/pgtable.c    2005-11-26 10:20:36.000000000 
+0000
+@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
  {
        unsigned long flags;
  
 -      if (PTRS_PER_PMD == 1)
--              spin_lock_irqsave(&pgd_lock, flags);
++      if (PTRS_PER_PMD > 1) {
++              if (HAVE_SHARED_KERNEL_PMD)
++                      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++                             swapper_pg_dir + USER_PTRS_PER_PGD,
++                             (PTRS_PER_PGD - USER_PTRS_PER_PGD) * 
sizeof(pgd_t));
++      } else {
+               spin_lock_irqsave(&pgd_lock, flags);
 -
 -      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
 -                      swapper_pg_dir + USER_PTRS_PER_PGD,
@@ -30,53 +36,40 @@
 -      pgd_list_add(pgd);
 -      spin_unlock_irqrestore(&pgd_lock, flags);
 -      memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+      if (PTRS_PER_PMD > 1) {
-+              if (HAVE_SHARED_KERNEL_PMD)
-+                      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+                             swapper_pg_dir, sizeof(pgd_t));
-+      } else {
-+              if (!HAVE_SHARED_KERNEL_PMD)
-+                      spin_lock_irqsave(&pgd_lock, flags);
 +              memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
 +                     swapper_pg_dir + USER_PTRS_PER_PGD,
 +                     (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 +              memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+              if (!HAVE_SHARED_KERNEL_PMD) {
-+                      pgd_list_add(pgd);
-+                      spin_unlock_irqrestore(&pgd_lock, flags);
-+              }
++              pgd_list_add(pgd);
++              spin_unlock_irqrestore(&pgd_lock, flags);
 +      }
  }
  
  /* never called when PTRS_PER_PMD > 1 */
-@@ -219,6 +222,9 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
- {
-       unsigned long flags; /* can be called from interrupt context */
- 
-+      if (HAVE_SHARED_KERNEL_PMD)
-+              return;
-+
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-@@ -238,6 +244,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
                        goto out_oom;
                set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
        }
 +
 +      if (!HAVE_SHARED_KERNEL_PMD) {
 +              unsigned long flags;
-+              pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
-+              pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
-+              pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
-+              pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+                ++i;
-+              if (!pmd)
-+                      goto out_oom;
++
++              for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                      pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++                      if (!pmd)
++                              goto out_oom;
++                      set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++              }
 +
 +              spin_lock_irqsave(&pgd_lock, flags);
-+              memcpy(pmd, copy_pmd, PAGE_SIZE);
-+              set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++              for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                      unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++                      pgd_t *kpgd = pgd_offset_k(v);
++                      pud_t *kpud = pud_offset(kpgd, v);
++                      pmd_t *kpmd = pmd_offset(kpud, v);
++                      pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++                      memcpy(pmd, kpmd, PAGE_SIZE);
++              }
 +              pgd_list_add(pgd);
 +              spin_unlock_irqrestore(&pgd_lock, flags);
 +      }
@@ -84,7 +77,7 @@
        return pgd;
  
  out_oom:
-@@ -252,9 +276,21 @@ void pgd_free(pgd_t *pgd)
+@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
        int i;
  
        /* in the PAE case user pgd entries are overwritten before usage */
@@ -98,12 +91,14 @@
 +              }
 +              if (!HAVE_SHARED_KERNEL_PMD) {
 +                      unsigned long flags;
-+                      pmd_t *pmd = (void 
*)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
 +                      spin_lock_irqsave(&pgd_lock, flags);
 +                      pgd_list_del(pgd);
 +                      spin_unlock_irqrestore(&pgd_lock, flags);
-+                      memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+                      kmem_cache_free(pmd_cache, pmd);
++                      for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                              pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++                              memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++                              kmem_cache_free(pmd_cache, pmd);
++                      }
 +              }
 +      }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] More cleanups to the pmd-shared patch., Xen patchbot -unstable <=