[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 07/11] mm/rmap: convert "enum rmap_level" to "enum pgtable_level"



Let's factor it out, and convert all checks for unsupported levels to
BUILD_BUG(). The code is written in a way such that force-inlining will
optimize out the levels.

Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
 include/linux/pgtable.h |  8 ++++++
 include/linux/rmap.h    | 60 +++++++++++++++++++----------------------
 mm/rmap.c               | 56 +++++++++++++++++++++-----------------
 3 files changed, 66 insertions(+), 58 deletions(-)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 4c035637eeb77..bff5c4241bf2e 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1958,6 +1958,14 @@ static inline bool arch_has_pfn_modify_check(void)
 /* Page-Table Modification Mask */
 typedef unsigned int pgtbl_mod_mask;
 
+enum pgtable_level {
+       PGTABLE_LEVEL_PTE = 0,
+       PGTABLE_LEVEL_PMD,
+       PGTABLE_LEVEL_PUD,
+       PGTABLE_LEVEL_P4D,
+       PGTABLE_LEVEL_PGD,
+};
+
 #endif /* !__ASSEMBLY__ */
 
 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6cd020eea37a2..9d40d127bdb78 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -394,18 +394,8 @@ typedef int __bitwise rmap_t;
 /* The anonymous (sub)page is exclusive to a single process. */
 #define RMAP_EXCLUSIVE         ((__force rmap_t)BIT(0))
 
-/*
- * Internally, we're using an enum to specify the granularity. We make the
- * compiler emit specialized code for each granularity.
- */
-enum rmap_level {
-       RMAP_LEVEL_PTE = 0,
-       RMAP_LEVEL_PMD,
-       RMAP_LEVEL_PUD,
-};
-
 static inline void __folio_rmap_sanity_checks(const struct folio *folio,
-               const struct page *page, int nr_pages, enum rmap_level level)
+               const struct page *page, int nr_pages, enum pgtable_level level)
 {
        /* hugetlb folios are handled separately. */
        VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
@@ -427,18 +417,18 @@ static inline void __folio_rmap_sanity_checks(const 
struct folio *folio,
        VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
 
        switch (level) {
-       case RMAP_LEVEL_PTE:
+       case PGTABLE_LEVEL_PTE:
                break;
-       case RMAP_LEVEL_PMD:
+       case PGTABLE_LEVEL_PMD:
                /*
                 * We don't support folios larger than a single PMD yet. So
-                * when RMAP_LEVEL_PMD is set, we assume that we are creating
+                * when PGTABLE_LEVEL_PMD is set, we assume that we are creating
                 * a single "entire" mapping of the folio.
                 */
                VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
                VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
                break;
-       case RMAP_LEVEL_PUD:
+       case PGTABLE_LEVEL_PUD:
                /*
                 * Assume that we are creating a single "entire" mapping of the
                 * folio.
@@ -447,7 +437,7 @@ static inline void __folio_rmap_sanity_checks(const struct 
folio *folio,
                VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
                break;
        default:
-               VM_WARN_ON_ONCE(true);
+               BUILD_BUG();
        }
 
        /*
@@ -567,14 +557,14 @@ static inline void hugetlb_remove_rmap(struct folio 
*folio)
 
 static __always_inline void __folio_dup_file_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
-               enum rmap_level level)
+               enum pgtable_level level)
 {
        const int orig_nr_pages = nr_pages;
 
        __folio_rmap_sanity_checks(folio, page, nr_pages, level);
 
        switch (level) {
-       case RMAP_LEVEL_PTE:
+       case PGTABLE_LEVEL_PTE:
                if (!folio_test_large(folio)) {
                        atomic_inc(&folio->_mapcount);
                        break;
@@ -587,11 +577,13 @@ static __always_inline void __folio_dup_file_rmap(struct 
folio *folio,
                }
                folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
                break;
-       case RMAP_LEVEL_PMD:
-       case RMAP_LEVEL_PUD:
+       case PGTABLE_LEVEL_PMD:
+       case PGTABLE_LEVEL_PUD:
                atomic_inc(&folio->_entire_mapcount);
                folio_inc_large_mapcount(folio, dst_vma);
                break;
+       default:
+               BUILD_BUG();
        }
 }
 
@@ -609,13 +601,13 @@ static __always_inline void __folio_dup_file_rmap(struct 
folio *folio,
 static inline void folio_dup_file_rmap_ptes(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
 {
-       __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
+       __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, 
PGTABLE_LEVEL_PTE);
 }
 
 static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
                struct page *page, struct vm_area_struct *dst_vma)
 {
-       __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
+       __folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -632,7 +624,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio 
*folio,
                struct page *page, struct vm_area_struct *dst_vma)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, 
RMAP_LEVEL_PTE);
+       __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, 
PGTABLE_LEVEL_PTE);
 #else
        WARN_ON_ONCE(true);
 #endif
@@ -640,7 +632,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio 
*folio,
 
 static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
-               struct vm_area_struct *src_vma, enum rmap_level level)
+               struct vm_area_struct *src_vma, enum pgtable_level level)
 {
        const int orig_nr_pages = nr_pages;
        bool maybe_pinned;
@@ -665,7 +657,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct 
folio *folio,
         * copying if the folio maybe pinned.
         */
        switch (level) {
-       case RMAP_LEVEL_PTE:
+       case PGTABLE_LEVEL_PTE:
                if (unlikely(maybe_pinned)) {
                        for (i = 0; i < nr_pages; i++)
                                if (PageAnonExclusive(page + i))
@@ -687,8 +679,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct 
folio *folio,
                } while (page++, --nr_pages > 0);
                folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
                break;
-       case RMAP_LEVEL_PMD:
-       case RMAP_LEVEL_PUD:
+       case PGTABLE_LEVEL_PMD:
+       case PGTABLE_LEVEL_PUD:
                if (PageAnonExclusive(page)) {
                        if (unlikely(maybe_pinned))
                                return -EBUSY;
@@ -697,6 +689,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct 
folio *folio,
                atomic_inc(&folio->_entire_mapcount);
                folio_inc_large_mapcount(folio, dst_vma);
                break;
+       default:
+               BUILD_BUG();
        }
        return 0;
 }
@@ -730,7 +724,7 @@ static inline int folio_try_dup_anon_rmap_ptes(struct folio 
*folio,
                struct vm_area_struct *src_vma)
 {
        return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
-                                        src_vma, RMAP_LEVEL_PTE);
+                                        src_vma, PGTABLE_LEVEL_PTE);
 }
 
 static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
@@ -738,7 +732,7 @@ static __always_inline int 
folio_try_dup_anon_rmap_pte(struct folio *folio,
                struct vm_area_struct *src_vma)
 {
        return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
-                                        RMAP_LEVEL_PTE);
+                                        PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -770,7 +764,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio 
*folio,
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
-                                        src_vma, RMAP_LEVEL_PMD);
+                                        src_vma, PGTABLE_LEVEL_PMD);
 #else
        WARN_ON_ONCE(true);
        return -EBUSY;
@@ -778,7 +772,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio 
*folio,
 }
 
 static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
-               struct page *page, int nr_pages, enum rmap_level level)
+               struct page *page, int nr_pages, enum pgtable_level level)
 {
        VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
        VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
@@ -873,7 +867,7 @@ static __always_inline int 
__folio_try_share_anon_rmap(struct folio *folio,
 static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
                struct page *page)
 {
-       return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+       return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -904,7 +898,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct 
folio *folio,
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
-                                          RMAP_LEVEL_PMD);
+                                          PGTABLE_LEVEL_PMD);
 #else
        WARN_ON_ONCE(true);
        return -EBUSY;
diff --git a/mm/rmap.c b/mm/rmap.c
index 84a8d8b02ef77..0e9c4041f8687 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1265,7 +1265,7 @@ static void __folio_mod_stat(struct folio *folio, int nr, 
int nr_pmdmapped)
 
 static __always_inline void __folio_add_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
-               enum rmap_level level)
+               enum pgtable_level level)
 {
        atomic_t *mapped = &folio->_nr_pages_mapped;
        const int orig_nr_pages = nr_pages;
@@ -1274,7 +1274,7 @@ static __always_inline void __folio_add_rmap(struct folio 
*folio,
        __folio_rmap_sanity_checks(folio, page, nr_pages, level);
 
        switch (level) {
-       case RMAP_LEVEL_PTE:
+       case PGTABLE_LEVEL_PTE:
                if (!folio_test_large(folio)) {
                        nr = atomic_inc_and_test(&folio->_mapcount);
                        break;
@@ -1300,11 +1300,11 @@ static __always_inline void __folio_add_rmap(struct 
folio *folio,
 
                folio_add_large_mapcount(folio, orig_nr_pages, vma);
                break;
-       case RMAP_LEVEL_PMD:
-       case RMAP_LEVEL_PUD:
+       case PGTABLE_LEVEL_PMD:
+       case PGTABLE_LEVEL_PUD:
                first = atomic_inc_and_test(&folio->_entire_mapcount);
                if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
-                       if (level == RMAP_LEVEL_PMD && first)
+                       if (level == PGTABLE_LEVEL_PMD && first)
                                nr_pmdmapped = folio_large_nr_pages(folio);
                        nr = folio_inc_return_large_mapcount(folio, vma);
                        if (nr == 1)
@@ -1323,7 +1323,7 @@ static __always_inline void __folio_add_rmap(struct folio 
*folio,
                                 * We only track PMD mappings of PMD-sized
                                 * folios separately.
                                 */
-                               if (level == RMAP_LEVEL_PMD)
+                               if (level == PGTABLE_LEVEL_PMD)
                                        nr_pmdmapped = nr_pages;
                                nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
                                /* Raced ahead of a remove and another add? */
@@ -1336,6 +1336,8 @@ static __always_inline void __folio_add_rmap(struct folio 
*folio,
                }
                folio_inc_large_mapcount(folio, vma);
                break;
+       default:
+               BUILD_BUG();
        }
        __folio_mod_stat(folio, nr, nr_pmdmapped);
 }
@@ -1427,7 +1429,7 @@ static void __page_check_anon_rmap(const struct folio 
*folio,
 
 static __always_inline void __folio_add_anon_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
-               unsigned long address, rmap_t flags, enum rmap_level level)
+               unsigned long address, rmap_t flags, enum pgtable_level level)
 {
        int i;
 
@@ -1440,20 +1442,22 @@ static __always_inline void 
__folio_add_anon_rmap(struct folio *folio,
 
        if (flags & RMAP_EXCLUSIVE) {
                switch (level) {
-               case RMAP_LEVEL_PTE:
+               case PGTABLE_LEVEL_PTE:
                        for (i = 0; i < nr_pages; i++)
                                SetPageAnonExclusive(page + i);
                        break;
-               case RMAP_LEVEL_PMD:
+               case PGTABLE_LEVEL_PMD:
                        SetPageAnonExclusive(page);
                        break;
-               case RMAP_LEVEL_PUD:
+               case PGTABLE_LEVEL_PUD:
                        /*
                         * Keep the compiler happy, we don't support anonymous
                         * PUD mappings.
                         */
                        WARN_ON_ONCE(1);
                        break;
+               default:
+                       BUILD_BUG();
                }
        }
 
@@ -1507,7 +1511,7 @@ void folio_add_anon_rmap_ptes(struct folio *folio, struct 
page *page,
                rmap_t flags)
 {
        __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
-                             RMAP_LEVEL_PTE);
+                             PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -1528,7 +1532,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct 
page *page,
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
-                             RMAP_LEVEL_PMD);
+                             PGTABLE_LEVEL_PMD);
 #else
        WARN_ON_ONCE(true);
 #endif
@@ -1609,7 +1613,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct 
vm_area_struct *vma,
 
 static __always_inline void __folio_add_file_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
-               enum rmap_level level)
+               enum pgtable_level level)
 {
        VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
 
@@ -1634,7 +1638,7 @@ static __always_inline void __folio_add_file_rmap(struct 
folio *folio,
 void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
                int nr_pages, struct vm_area_struct *vma)
 {
-       __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
+       __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -1651,7 +1655,7 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct 
page *page,
                struct vm_area_struct *vma)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+       __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, 
PGTABLE_LEVEL_PMD);
 #else
        WARN_ON_ONCE(true);
 #endif
@@ -1672,7 +1676,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct 
page *page,
 {
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-       __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
+       __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, 
PGTABLE_LEVEL_PUD);
 #else
        WARN_ON_ONCE(true);
 #endif
@@ -1680,7 +1684,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct 
page *page,
 
 static __always_inline void __folio_remove_rmap(struct folio *folio,
                struct page *page, int nr_pages, struct vm_area_struct *vma,
-               enum rmap_level level)
+               enum pgtable_level level)
 {
        atomic_t *mapped = &folio->_nr_pages_mapped;
        int last = 0, nr = 0, nr_pmdmapped = 0;
@@ -1689,7 +1693,7 @@ static __always_inline void __folio_remove_rmap(struct 
folio *folio,
        __folio_rmap_sanity_checks(folio, page, nr_pages, level);
 
        switch (level) {
-       case RMAP_LEVEL_PTE:
+       case PGTABLE_LEVEL_PTE:
                if (!folio_test_large(folio)) {
                        nr = atomic_add_negative(-1, &folio->_mapcount);
                        break;
@@ -1719,11 +1723,11 @@ static __always_inline void __folio_remove_rmap(struct 
folio *folio,
 
                partially_mapped = nr && atomic_read(mapped);
                break;
-       case RMAP_LEVEL_PMD:
-       case RMAP_LEVEL_PUD:
+       case PGTABLE_LEVEL_PMD:
+       case PGTABLE_LEVEL_PUD:
                if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
                        last = atomic_add_negative(-1, 
&folio->_entire_mapcount);
-                       if (level == RMAP_LEVEL_PMD && last)
+                       if (level == PGTABLE_LEVEL_PMD && last)
                                nr_pmdmapped = folio_large_nr_pages(folio);
                        nr = folio_dec_return_large_mapcount(folio, vma);
                        if (!nr) {
@@ -1743,7 +1747,7 @@ static __always_inline void __folio_remove_rmap(struct 
folio *folio,
                        nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
                        if (likely(nr < ENTIRELY_MAPPED)) {
                                nr_pages = folio_large_nr_pages(folio);
-                               if (level == RMAP_LEVEL_PMD)
+                               if (level == PGTABLE_LEVEL_PMD)
                                        nr_pmdmapped = nr_pages;
                                nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
                                /* Raced ahead of another remove and an add? */
@@ -1757,6 +1761,8 @@ static __always_inline void __folio_remove_rmap(struct 
folio *folio,
 
                partially_mapped = nr && nr < nr_pmdmapped;
                break;
+       default:
+               BUILD_BUG();
        }
 
        /*
@@ -1796,7 +1802,7 @@ static __always_inline void __folio_remove_rmap(struct 
folio *folio,
 void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
                int nr_pages, struct vm_area_struct *vma)
 {
-       __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
+       __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
 }
 
 /**
@@ -1813,7 +1819,7 @@ void folio_remove_rmap_pmd(struct folio *folio, struct 
page *page,
                struct vm_area_struct *vma)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+       __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
 #else
        WARN_ON_ONCE(true);
 #endif
@@ -1834,7 +1840,7 @@ void folio_remove_rmap_pud(struct folio *folio, struct 
page *page,
 {
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-       __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
+       __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
 #else
        WARN_ON_ONCE(true);
 #endif
-- 
2.50.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.