[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 03/14] swiotlb: move orig addr and size validation into swiotlb_bounce



Move the code to find and validate the original buffer address and size
from the callers into swiotlb_bounce.  This means a tiny bit of extra
work in the swiotlb_map path, but avoids code duplication and a leads to
a better code structure.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 kernel/dma/swiotlb.c | 59 +++++++++++++++++---------------------------
 1 file changed, 23 insertions(+), 36 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 03aa614565e417..a9063092f6f566 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -460,12 +460,25 @@ void __init swiotlb_exit(void)
 /*
  * Bounce: copy the swiotlb buffer from or back to the original dma location
  */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
-                          size_t size, enum dma_data_direction dir)
+static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t 
size,
+               enum dma_data_direction dir)
 {
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       size_t alloc_size = io_tlb_alloc_size[index];
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
        unsigned long pfn = PFN_DOWN(orig_addr);
        unsigned char *vaddr = phys_to_virt(tlb_addr);
 
+       if (orig_addr == INVALID_PHYS_ADDR)
+               return;
+
+       if (size > alloc_size) {
+               dev_WARN_ONCE(dev, 1,
+                       "Buffer overflow detected. Allocation size: %zu. 
Mapping size: %zu.\n",
+                       alloc_size, size);
+               size = alloc_size;
+       }
+
        if (PageHighMem(pfn_to_page(pfn))) {
                /* The buffer does not have a mapping.  Map it in and copy */
                unsigned int offset = orig_addr & ~PAGE_MASK;
@@ -644,21 +657,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, 
phys_addr_t orig_addr,
        tlb_addr = slot_addr(io_tlb_start, index) + offset;
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(orig_addr, tlb_addr, mapping_size, 
DMA_TO_DEVICE);
+               swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
        return tlb_addr;
 }
 
-static void validate_sync_size_and_truncate(struct device *hwdev, size_t 
alloc_size, size_t *size)
-{
-       if (*size > alloc_size) {
-               /* Warn and truncate mapping_size */
-               dev_WARN_ONCE(hwdev, 1,
-                       "Attempt for buffer overflow. Original size: %zu. 
Mapping size: %zu.\n",
-                       alloc_size, *size);
-               *size = alloc_size;
-       }
-}
-
 /*
  * tlb_addr is the physical address of the bounce buffer to unmap.
  */
@@ -669,19 +671,15 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, 
phys_addr_t tlb_addr,
        unsigned long flags;
        unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
        int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-       size_t alloc_size = io_tlb_alloc_size[index];
-       int i, count, nslots = nr_slots(alloc_size + offset);
-
-       validate_sync_size_and_truncate(hwdev, alloc_size, &mapping_size);
+       int nslots = nr_slots(io_tlb_alloc_size[index] + offset);
+       int count, i;
 
        /*
         * First, sync the memory before unmapping the entry
         */
-       if (orig_addr != INVALID_PHYS_ADDR &&
-           !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(orig_addr, tlb_addr, mapping_size, 
DMA_FROM_DEVICE);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+               swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
 
        /*
         * Return the buffer to the free list by setting the corresponding
@@ -721,27 +719,16 @@ void swiotlb_tbl_sync_single(struct device *hwdev, 
phys_addr_t tlb_addr,
                             size_t size, enum dma_data_direction dir,
                             enum dma_sync_target target)
 {
-       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       size_t alloc_size = io_tlb_alloc_size[index];
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-       if (orig_addr == INVALID_PHYS_ADDR)
-               return;
-
-       validate_sync_size_and_truncate(hwdev, alloc_size, &size);
-
        switch (target) {
        case SYNC_FOR_CPU:
                if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_FROM_DEVICE);
+                       swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE);
                else
                        BUG_ON(dir != DMA_TO_DEVICE);
                break;
        case SYNC_FOR_DEVICE:
                if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_TO_DEVICE);
+                       swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE);
                else
                        BUG_ON(dir != DMA_FROM_DEVICE);
                break;
-- 
2.29.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.