[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 14/15] swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl



No users left.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 include/linux/swiotlb.h |  2 --
 kernel/dma/swiotlb.c    | 77 +++++++++++------------------------------
 2 files changed, 20 insertions(+), 59 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7b50c82f84ce9..7ed35dd3de6e7 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -34,13 +34,11 @@ struct scatterlist;
 /* default to 64MB */
 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
 
-int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, unsigned int flags);
 unsigned long swiotlb_size_or_default(void);
 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
        int (*remap)(void *tlb, unsigned long nslabs));
 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
        int (*remap)(void *tlb, unsigned long nslabs));
-extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern void __init swiotlb_update_mem_attributes(void);
 
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d5fe8f5e08300..c54fc40ebb493 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -225,33 +225,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem 
*mem, phys_addr_t start,
        return;
 }
 
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
-               unsigned int flags)
-{
-       struct io_tlb_mem *mem = &io_tlb_default_mem;
-       size_t alloc_size;
-
-       if (swiotlb_force_disable)
-               return 0;
-
-       /* protect against double initialization */
-       if (WARN_ON_ONCE(mem->nslabs))
-               return -ENOMEM;
-
-       alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
-       mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
-       if (!mem->slots)
-               panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
-                     __func__, alloc_size, PAGE_SIZE);
-
-       swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
-       mem->force_bounce = flags & SWIOTLB_FORCE;
-
-       if (flags & SWIOTLB_VERBOSE)
-               swiotlb_print_info();
-       return 0;
-}
-
 /*
  * Statically reserve bounce buffer space and initialize bounce buffer data
  * structures for the software IO TLB used to implement the DMA API.
@@ -259,7 +232,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long 
nslabs,
 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
                int (*remap)(void *tlb, unsigned long nslabs))
 {
+       struct io_tlb_mem *mem = &io_tlb_default_mem;
        unsigned long nslabs = default_nslabs;
+       size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
        size_t bytes;
        void *tlb;
 
@@ -280,7 +255,8 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
        else
                tlb = memblock_alloc_low(bytes, PAGE_SIZE);
        if (!tlb)
-               goto fail;
+               panic("%s: failed to allocate tlb structure\n", __func__);
+
        if (remap && remap(tlb, nslabs) < 0) {
                memblock_free(tlb, PAGE_ALIGN(bytes));
 
@@ -290,14 +266,17 @@ void __init swiotlb_init_remap(bool addressing_limit, 
unsigned int flags,
                              __func__, bytes);
                goto retry;
        }
-       if (swiotlb_init_with_tbl(tlb, default_nslabs, flags))
-               goto fail_free_mem;
-       return;
 
-fail_free_mem:
-       memblock_free(tlb, bytes);
-fail:
-       pr_warn("Cannot allocate buffer");
+       mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
+       if (!mem->slots)
+               panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+                     __func__, alloc_size, PAGE_SIZE);
+
+       swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false);
+       mem->force_bounce = flags & SWIOTLB_FORCE;
+
+       if (flags & SWIOTLB_VERBOSE)
+               swiotlb_print_info();
 }
 
 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
@@ -313,6 +292,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned 
int flags)
 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
                int (*remap)(void *tlb, unsigned long nslabs))
 {
+       struct io_tlb_mem *mem = &io_tlb_default_mem;
        unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
        unsigned long bytes;
        unsigned char *vstart = NULL;
@@ -353,33 +333,16 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
                        return rc;
                goto retry;
        }
-       rc = swiotlb_late_init_with_tbl(vstart, nslabs);
-       if (rc)
-               free_pages((unsigned long)vstart, order);
-
-       return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
-       struct io_tlb_mem *mem = &io_tlb_default_mem;
-       unsigned long bytes = nslabs << IO_TLB_SHIFT;
-
-       if (swiotlb_force_disable)
-               return 0;
-
-       /* protect against double initialization */
-       if (WARN_ON_ONCE(mem->nslabs))
-               return -ENOMEM;
 
        mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                get_order(array_size(sizeof(*mem->slots), nslabs)));
-       if (!mem->slots)
+       if (!mem->slots) {
+               free_pages((unsigned long)vstart, order);
                return -ENOMEM;
+       }
 
-       set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
-       swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
+       set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+       swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 
        swiotlb_print_info();
        return 0;
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.