[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 06/11] swiotlb-xen: always use dma-direct helpers to alloc coherent pages



x86 currently calls alloc_pages, but using dma-direct works as well
there, with the added benefit of using the CMA pool if available.
The biggest advantage is of course to remove a pointless bit of
architecture specific code.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 arch/x86/include/asm/xen/page-coherent.h | 16 ----------------
 drivers/xen/swiotlb-xen.c                |  7 +++----
 include/xen/arm/page-coherent.h          | 12 ------------
 3 files changed, 3 insertions(+), 32 deletions(-)

diff --git a/arch/x86/include/asm/xen/page-coherent.h 
b/arch/x86/include/asm/xen/page-coherent.h
index 116777e7f387..8ee33c5edded 100644
--- a/arch/x86/include/asm/xen/page-coherent.h
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -5,22 +5,6 @@
 #include <asm/page.h>
 #include <linux/dma-mapping.h>
 
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flags,
-               unsigned long attrs)
-{
-       void *vstart = (void*)__get_free_pages(flags, get_order(size));
-       *dma_handle = virt_to_phys(vstart);
-       return vstart;
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle,
-               unsigned long attrs)
-{
-       free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
             dma_addr_t dev_addr, unsigned long offset, size_t size,
             enum dma_data_direction dir, unsigned long attrs) { }
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index b8808677ae1d..f9dd4cb6e4b3 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -299,8 +299,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t 
size,
         * address. In fact on ARM virt_to_phys only works for kernel direct
         * mapped RAM memory. Also see comment below.
         */
-       ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
-
+       ret = dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
        if (!ret)
                return ret;
 
@@ -319,7 +318,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t 
size,
        else {
                if (xen_create_contiguous_region(phys, order,
                                                 fls64(dma_mask), dma_handle) 
!= 0) {
-                       xen_free_coherent_pages(hwdev, size, ret, 
(dma_addr_t)phys, attrs);
+                       dma_direct_free(hwdev, size, ret, (dma_addr_t)phys, 
attrs);
                        return NULL;
                }
                SetPageXenRemapped(virt_to_page(ret));
@@ -351,7 +350,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
            TestClearPageXenRemapped(virt_to_page(vaddr)))
                xen_destroy_contiguous_region(phys, order);
 
-       xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+       dma_direct_free(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
 }
 
 /*
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index da2cc09c8eda..4294a31305ca 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -16,18 +16,6 @@ void __xen_dma_sync_single_for_cpu(struct device *hwdev,
 void __xen_dma_sync_single_for_device(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir);
 
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
-       return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-               void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
-       dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
                dma_addr_t handle, size_t size, enum dma_data_direction dir)
 {
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.