[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 8/9] mm: introduce and use vm_normal_page_pud()



On Thu, Jul 17, 2025 at 01:52:11PM +0200, David Hildenbrand wrote:
> Let's introduce vm_normal_page_pud(), which ends up being fairly simple
> because of our new common helpers and there not being a PUD-sized zero
> folio.
>
> Use vm_normal_page_pud() in folio_walk_start() to resolve a TODO,
> structuring the code like the other (pmd/pte) cases. Defer
> introducing vm_normal_folio_pud() until really used.

I mean fine :P but does anybody really use this?

>
> Reviewed-by: Oscar Salvador <osalvador@xxxxxxx>
> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>

Seems ok to me, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>

> ---
>  include/linux/mm.h |  2 ++
>  mm/memory.c        | 27 +++++++++++++++++++++++++++
>  mm/pagewalk.c      | 20 ++++++++++----------
>  3 files changed, 39 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index abc47f1f307fb..0eb991262fbbf 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2349,6 +2349,8 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct 
> *vma,
>                                 unsigned long addr, pmd_t pmd);
>  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long 
> addr,
>                               pmd_t pmd);
> +struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long 
> addr,
> +             pud_t pud);
>
>  void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
>                 unsigned long size);
> diff --git a/mm/memory.c b/mm/memory.c
> index c43ae5e4d7644..00a0d7ae3ba4a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -796,6 +796,33 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct 
> *vma,
>               return page_folio(page);
>       return NULL;
>  }
> +
> +/**
> + * vm_normal_page_pud() - Get the "struct page" associated with a PUD
> + * @vma: The VMA mapping the @pud.
> + * @addr: The address where the @pud is mapped.
> + * @pud: The PUD.
> + *
> + * Get the "struct page" associated with a PUD. See vm_normal_page_pfn()
> + * for details.
> + *
> + * Return: Returns the "struct page" if this is a "normal" mapping. Returns
> + *      NULL if this is a "special" mapping.
> + */
> +struct page *vm_normal_page_pud(struct vm_area_struct *vma,
> +             unsigned long addr, pud_t pud)
> +{
> +     unsigned long pfn = pud_pfn(pud);
> +
> +     if (unlikely(pud_special(pud))) {
> +             if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
> +                     return NULL;
> +
> +             print_bad_page_map(vma, addr, pud_val(pud), NULL);
> +             return NULL;
> +     }
> +     return vm_normal_page_pfn(vma, addr, pfn, pud_val(pud));
> +}
>  #endif
>
>  /**
> diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> index 648038247a8d2..c6753d370ff4e 100644
> --- a/mm/pagewalk.c
> +++ b/mm/pagewalk.c
> @@ -902,23 +902,23 @@ struct folio *folio_walk_start(struct folio_walk *fw,
>               fw->pudp = pudp;
>               fw->pud = pud;
>
> -             /*
> -              * TODO: FW_MIGRATION support for PUD migration entries
> -              * once there are relevant users.
> -              */
> -             if (!pud_present(pud) || pud_special(pud)) {
> +             if (pud_none(pud)) {
>                       spin_unlock(ptl);
>                       goto not_found;
> -             } else if (!pud_leaf(pud)) {
> +             } else if (pud_present(pud) && !pud_leaf(pud)) {
>                       spin_unlock(ptl);
>                       goto pmd_table;
> +             } else if (pud_present(pud)) {
> +                     page = vm_normal_page_pud(vma, addr, pud);
> +                     if (page)
> +                             goto found;
>               }
>               /*
> -              * TODO: vm_normal_page_pud() will be handy once we want to
> -              * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
> +              * TODO: FW_MIGRATION support for PUD migration entries
> +              * once there are relevant users.
>                */
> -             page = pud_page(pud);
> -             goto found;
> +             spin_unlock(ptl);
> +             goto not_found;
>       }
>
>  pmd_table:
> --
> 2.50.1
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.