[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 3/3] swiotlb: use the right nslabs-derived sizes in swiotlb_init_late



nslabs can shrink when allocations or the remap don't succeed, so make
sure to use it for all sizing.  For that remove the bytes value that
can get stale and replace it with local calculations and a boolean to
indicate if the originally requested size could not be allocated.

Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and 
swiotlb_init_late_with_tbl")
Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 kernel/dma/swiotlb.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 113e1e8aaca37..d6e62a6a42ceb 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -297,9 +297,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 {
        struct io_tlb_mem *mem = &io_tlb_default_mem;
        unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
-       unsigned long bytes;
        unsigned char *vstart = NULL;
        unsigned int order;
+       bool retried = false;
        int rc = 0;
 
        if (swiotlb_force_disable)
@@ -308,7 +308,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 retry:
        order = get_order(nslabs << IO_TLB_SHIFT);
        nslabs = SLABS_PER_PAGE << order;
-       bytes = nslabs << IO_TLB_SHIFT;
 
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
                vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
@@ -316,16 +315,13 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
                if (vstart)
                        break;
                order--;
+               nslabs = SLABS_PER_PAGE << order;
+               retried = true;
        }
 
        if (!vstart)
                return -ENOMEM;
 
-       if (order != get_order(bytes)) {
-               pr_warn("only able to allocate %ld MB\n",
-                       (PAGE_SIZE << order) >> 20);
-               nslabs = SLABS_PER_PAGE << order;
-       }
        if (remap)
                rc = remap(vstart, nslabs);
        if (rc) {
@@ -334,9 +330,15 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
                nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
                if (nslabs < IO_TLB_MIN_SLABS)
                        return rc;
+               retried = true;
                goto retry;
        }
 
+       if (retried) {
+               pr_warn("only able to allocate %ld MB\n",
+                       (PAGE_SIZE << order) >> 20);
+       }
+
        mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                get_order(array_size(sizeof(*mem->slots), nslabs)));
        if (!mem->slots) {
@@ -344,7 +346,8 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
                return -ENOMEM;
        }
 
-       set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+       set_memory_decrypted((unsigned long)vstart,
+                            (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
        swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 
        swiotlb_print_info();
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.