[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] xen: replace xen_remap() with memremap()



On Mon, 30 May 2022, Juergen Gross wrote:
> xen_remap() is used to establish mappings for frames not under direct
> control of the kernel: for Xenstore and console ring pages, and for
> grant pages of non-PV guests.
> 
> Today xen_remap() is defined to use ioremap() on x86 (doing uncached
> mappings), and ioremap_cache() on Arm (doing cached mappings).
> 
> Uncached mappings for those use cases are bad for performance, so they
> should be avoided if possible. As all use cases of xen_remap() don't
> require uncached mappings (the mapped area is always physical RAM),
> a mapping using the standard WB cache mode is fine.
> 
> As sparse is flagging some of the xen_remap() use cases to be not
> appropriate for iomem(), as the result is not annotated with the
> __iomem modifier, eliminate xen_remap() completely and replace all
> use cases with memremap() specifying the MEMREMAP_WB caching mode.
> 
> xen_unmap() can be replaced with memunmap().
> 
> Reported-by: kernel test robot <lkp@xxxxxxxxx>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>

Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
>  arch/x86/include/asm/xen/page.h   | 3 ---
>  drivers/tty/hvc/hvc_xen.c         | 2 +-
>  drivers/xen/grant-table.c         | 6 +++---
>  drivers/xen/xenbus/xenbus_probe.c | 8 ++++----
>  include/xen/arm/page.h            | 3 ---
>  5 files changed, 8 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
> index 1fc67df50014..fa9ec20783fa 100644
> --- a/arch/x86/include/asm/xen/page.h
> +++ b/arch/x86/include/asm/xen/page.h
> @@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
>  void make_lowmem_page_readonly(void *vaddr);
>  void make_lowmem_page_readwrite(void *vaddr);
>  
> -#define xen_remap(cookie, size) ioremap((cookie), (size))
> -#define xen_unmap(cookie) iounmap((cookie))
> -
>  static inline bool xen_arch_need_swiotlb(struct device *dev,
>                                        phys_addr_t phys,
>                                        dma_addr_t dev_addr)
> diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
> index ebaf7500f48f..7c23112dc923 100644
> --- a/drivers/tty/hvc/hvc_xen.c
> +++ b/drivers/tty/hvc/hvc_xen.c
> @@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
>       if (r < 0 || v == 0)
>               goto err;
>       gfn = v;
> -     info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE);
> +     info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, 
> MEMREMAP_WB);
>       if (info->intf == NULL)
>               goto err;
>       info->vtermno = HVC_COOKIE;
> diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
> index 1a1aec0a88a1..2f4f0ed5d8f8 100644
> --- a/drivers/xen/grant-table.c
> +++ b/drivers/xen/grant-table.c
> @@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
>       if (xen_auto_xlat_grant_frames.count)
>               return -EINVAL;
>  
> -     vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
> +     vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
>       if (vaddr == NULL) {
>               pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
>                       &addr);
> @@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
>       }
>       pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
>       if (!pfn) {
> -             xen_unmap(vaddr);
> +             memunmap(vaddr);
>               return -ENOMEM;
>       }
>       for (i = 0; i < max_nr_gframes; i++)
> @@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
>       if (!xen_auto_xlat_grant_frames.count)
>               return;
>       kfree(xen_auto_xlat_grant_frames.pfn);
> -     xen_unmap(xen_auto_xlat_grant_frames.vaddr);
> +     memunmap(xen_auto_xlat_grant_frames.vaddr);
>  
>       xen_auto_xlat_grant_frames.pfn = NULL;
>       xen_auto_xlat_grant_frames.count = 0;
> diff --git a/drivers/xen/xenbus/xenbus_probe.c 
> b/drivers/xen/xenbus/xenbus_probe.c
> index d367f2bd2b93..58b732dcbfb8 100644
> --- a/drivers/xen/xenbus/xenbus_probe.c
> +++ b/drivers/xen/xenbus/xenbus_probe.c
> @@ -752,8 +752,8 @@ static void xenbus_probe(void)
>       xenstored_ready = 1;
>  
>       if (!xen_store_interface) {
> -             xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
> -                                             XEN_PAGE_SIZE);
> +             xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
> +                                            XEN_PAGE_SIZE, MEMREMAP_WB);
>               /*
>                * Now it is safe to free the IRQ used for xenstore late
>                * initialization. No need to unbind: it is about to be
> @@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
>  #endif
>                       xen_store_gfn = (unsigned long)v;
>                       xen_store_interface =
> -                             xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
> -                                       XEN_PAGE_SIZE);
> +                             memremap(xen_store_gfn << XEN_PAGE_SHIFT,
> +                                      XEN_PAGE_SIZE, MEMREMAP_WB);
>                       if (xen_store_interface->connection != 
> XENSTORE_CONNECTED)
>                               wait = true;
>               }
> diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
> index 7e199c6656b9..e5c84ff28c8b 100644
> --- a/include/xen/arm/page.h
> +++ b/include/xen/arm/page.h
> @@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, 
> unsigned long mfn)
>       return __set_phys_to_machine(pfn, mfn);
>  }
>  
> -#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
> -#define xen_unmap(cookie) iounmap((cookie))
> -
>  bool xen_arch_need_swiotlb(struct device *dev,
>                          phys_addr_t phys,
>                          dma_addr_t dev_addr);
> -- 
> 2.35.3
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.