[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 8/9] mm: introduce and use vm_normal_page_pud()
Let's introduce vm_normal_page_pud(), which ends up being fairly simple because of our new common helpers and there not being a PUD-sized zero folio. Use vm_normal_page_pud() in folio_walk_start() to resolve a TODO, structuring the code like the other (pmd/pte) cases. Defer introducing vm_normal_folio_pud() until really used. Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- include/linux/mm.h | 2 ++ mm/memory.c | 27 +++++++++++++++++++++++++++ mm/pagewalk.c | 20 ++++++++++---------- 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 611f337cc36c9..6877c894fe526 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2347,6 +2347,8 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); +struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t pud); void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); diff --git a/mm/memory.c b/mm/memory.c index d5f80419989b9..f1834a19a2f1e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -802,6 +802,33 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, return page_folio(page); return NULL; } + +/** + * vm_normal_page_pud() - Get the "struct page" associated with a PUD + * @vma: The VMA mapping the @pud. + * @addr: The address where the @pud is mapped. + * @pud: The PUD. + * + * Get the "struct page" associated with a PUD. See vm_normal_page_pfn() + * for details. + * + * Return: Returns the "struct page" if this is a "normal" mapping. Returns + * NULL if this is a "special" mapping. + */ +struct page *vm_normal_page_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t pud) +{ + unsigned long pfn = pud_pfn(pud); + + if (unlikely(pud_special(pud))) { + if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + return NULL; + + print_bad_page_map(vma, addr, pud_val(pud), NULL); + return NULL; + } + return vm_normal_page_pfn(vma, addr, pfn, pud_val(pud)); +} #endif /** diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 648038247a8d2..c6753d370ff4e 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -902,23 +902,23 @@ struct folio *folio_walk_start(struct folio_walk *fw, fw->pudp = pudp; fw->pud = pud; - /* - * TODO: FW_MIGRATION support for PUD migration entries - * once there are relevant users. - */ - if (!pud_present(pud) || pud_special(pud)) { + if (pud_none(pud)) { spin_unlock(ptl); goto not_found; - } else if (!pud_leaf(pud)) { + } else if (pud_present(pud) && !pud_leaf(pud)) { spin_unlock(ptl); goto pmd_table; + } else if (pud_present(pud)) { + page = vm_normal_page_pud(vma, addr, pud); + if (page) + goto found; } /* - * TODO: vm_normal_page_pud() will be handy once we want to - * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs. + * TODO: FW_MIGRATION support for PUD migration entries + * once there are relevant users. */ - page = pud_page(pud); - goto found; + spin_unlock(ptl); + goto not_found; } pmd_table: -- 2.50.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |