[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 04/15] [swiotlb] Search and replace s/io_tlb/iommu_sw->/



We also fix the checkpatch.pl errors that surfaced during
this conversion.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 lib/swiotlb.c |  204 +++++++++++++++++++++++++++++----------------------------
 1 files changed, 104 insertions(+), 100 deletions(-)

diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index e84f269..3499001 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -176,14 +176,14 @@ EXPORT_SYMBOL(swiotlb_register_engine);
 
 void swiotlb_print_info(void)
 {
-       unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+       unsigned long bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
        phys_addr_t pstart, pend;
 
-       pstart = virt_to_phys(io_tlb_start);
-       pend = virt_to_phys(io_tlb_end);
+       pstart = virt_to_phys(iommu_sw->start);
+       pend = virt_to_phys(iommu_sw->end);
 
        printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
-              bytes >> 20, io_tlb_start, io_tlb_end);
+              bytes >> 20, iommu_sw->start, iommu_sw->end);
        printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
               (unsigned long long)pstart,
               (unsigned long long)pend);
@@ -198,37 +198,38 @@ swiotlb_init_with_default_size(size_t default_size, int 
verbose)
 {
        unsigned long i, bytes;
 
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       if (!iommu_sw->nslabs) {
+               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
+               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
        }
 
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
 
        /*
         * Get IO TLB memory from the low pages
         */
-       io_tlb_start = alloc_bootmem_low_pages(bytes);
-       if (!io_tlb_start)
+       iommu_sw->start = alloc_bootmem_low_pages(bytes);
+       if (!iommu_sw->start)
                panic("Cannot allocate SWIOTLB buffer");
-       io_tlb_end = io_tlb_start + bytes;
+       iommu_sw->end = iommu_sw->start + bytes;
 
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
+        * between iommu_sw->start and iommu_sw->end.
         */
-       io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
-       io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
+       iommu_sw->list = alloc_bootmem(iommu_sw->nslabs * sizeof(int));
+       for (i = 0; i < iommu_sw->nslabs; i++)
+               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+       iommu_sw->index = 0;
+       iommu_sw->orig_addr = alloc_bootmem(iommu_sw->nslabs *
+                                           sizeof(phys_addr_t));
 
        /*
         * Get the overflow emergency buffer
         */
-       io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-       if (!io_tlb_overflow_buffer)
+       iommu_sw->overflow_buffer = alloc_bootmem_low(iommu_sw->overflow);
+       if (!iommu_sw->overflow_buffer)
                panic("Cannot allocate SWIOTLB overflow buffer!\n");
        if (verbose)
                swiotlb_print_info();
@@ -248,70 +249,70 @@ swiotlb_init(int verbose)
 int
 swiotlb_late_init_with_default_size(size_t default_size)
 {
-       unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
+       unsigned long i, bytes, req_nslabs = iommu_sw->nslabs;
        unsigned int order;
 
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       if (!iommu_sw->nslabs) {
+               iommu_sw->nslabs = (default_size >> IO_TLB_SHIFT);
+               iommu_sw->nslabs = ALIGN(iommu_sw->nslabs, IO_TLB_SEGSIZE);
        }
 
        /*
         * Get IO TLB memory from the low pages
         */
-       order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
-       io_tlb_nslabs = SLABS_PER_PAGE << order;
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+       order = get_order(iommu_sw->nslabs << IO_TLB_SHIFT);
+       iommu_sw->nslabs = SLABS_PER_PAGE << order;
+       bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
 
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
-                                                       order);
-               if (io_tlb_start)
+               iommu_sw->start = (void *)__get_free_pages(GFP_DMA |
+                                                       __GFP_NOWARN, order);
+               if (iommu_sw->start)
                        break;
                order--;
        }
 
-       if (!io_tlb_start)
+       if (!iommu_sw->start)
                goto cleanup1;
 
        if (order != get_order(bytes)) {
                printk(KERN_WARNING "Warning: only able to allocate %ld MB "
                       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
-               io_tlb_nslabs = SLABS_PER_PAGE << order;
-               bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+               iommu_sw->nslabs = SLABS_PER_PAGE << order;
+               bytes = iommu_sw->nslabs << IO_TLB_SHIFT;
        }
-       io_tlb_end = io_tlb_start + bytes;
-       memset(io_tlb_start, 0, bytes);
+       iommu_sw->end = iommu_sw->start + bytes;
+       memset(iommu_sw->start, 0, bytes);
 
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
+        * between iommu_sw->start and iommu_sw->end.
         */
-       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
-                                     get_order(io_tlb_nslabs * sizeof(int)));
-       if (!io_tlb_list)
+       iommu_sw->list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+                               get_order(iommu_sw->nslabs * sizeof(int)));
+       if (!iommu_sw->list)
                goto cleanup2;
 
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
+       for (i = 0; i < iommu_sw->nslabs; i++)
+               iommu_sw->list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+       iommu_sw->index = 0;
 
-       io_tlb_orig_addr = (phys_addr_t *)
+       iommu_sw->orig_addr = (phys_addr_t *)
                __get_free_pages(GFP_KERNEL,
-                                get_order(io_tlb_nslabs *
+                                get_order(iommu_sw->nslabs *
                                           sizeof(phys_addr_t)));
-       if (!io_tlb_orig_addr)
+       if (!iommu_sw->orig_addr)
                goto cleanup3;
 
-       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
+       memset(iommu_sw->orig_addr, 0, iommu_sw->nslabs * sizeof(phys_addr_t));
 
        /*
         * Get the overflow emergency buffer
         */
-       io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                                 get_order(io_tlb_overflow));
-       if (!io_tlb_overflow_buffer)
+       iommu_sw->overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                               get_order(iommu_sw->overflow));
+       if (!iommu_sw->overflow_buffer)
                goto cleanup4;
 
        swiotlb_print_info();
@@ -321,52 +322,52 @@ swiotlb_late_init_with_default_size(size_t default_size)
        return 0;
 
 cleanup4:
-       free_pages((unsigned long)io_tlb_orig_addr,
-                  get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-       io_tlb_orig_addr = NULL;
+       free_pages((unsigned long)iommu_sw->orig_addr,
+                  get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
+       iommu_sw->orig_addr = NULL;
 cleanup3:
-       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+       free_pages((unsigned long)iommu_sw->list, get_order(iommu_sw->nslabs *
                                                         sizeof(int)));
-       io_tlb_list = NULL;
+       iommu_sw->list = NULL;
 cleanup2:
-       io_tlb_end = NULL;
-       free_pages((unsigned long)io_tlb_start, order);
-       io_tlb_start = NULL;
+       iommu_sw->end = NULL;
+       free_pages((unsigned long)iommu_sw->start, order);
+       iommu_sw->start = NULL;
 cleanup1:
-       io_tlb_nslabs = req_nslabs;
+       iommu_sw->nslabs = req_nslabs;
        return -ENOMEM;
 }
 
 void __init swiotlb_free(void)
 {
-       if (!io_tlb_overflow_buffer)
+       if (!iommu_sw->overflow_buffer)
                return;
 
        if (late_alloc) {
-               free_pages((unsigned long)io_tlb_overflow_buffer,
-                          get_order(io_tlb_overflow));
-               free_pages((unsigned long)io_tlb_orig_addr,
-                          get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-               free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
-                                                                sizeof(int)));
-               free_pages((unsigned long)io_tlb_start,
-                          get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+               free_pages((unsigned long)iommu_sw->overflow_buffer,
+                          get_order(iommu_sw->overflow));
+               free_pages((unsigned long)iommu_sw->orig_addr,
+                          get_order(iommu_sw->nslabs * sizeof(phys_addr_t)));
+               free_pages((unsigned long)iommu_sw->list,
+                          get_order(iommu_sw->nslabs * sizeof(int)));
+               free_pages((unsigned long)iommu_sw->start,
+                          get_order(iommu_sw->nslabs << IO_TLB_SHIFT));
        } else {
-               free_bootmem_late(__pa(io_tlb_overflow_buffer),
-                                 io_tlb_overflow);
-               free_bootmem_late(__pa(io_tlb_orig_addr),
-                                 io_tlb_nslabs * sizeof(phys_addr_t));
-               free_bootmem_late(__pa(io_tlb_list),
-                                 io_tlb_nslabs * sizeof(int));
-               free_bootmem_late(__pa(io_tlb_start),
-                                 io_tlb_nslabs << IO_TLB_SHIFT);
+               free_bootmem_late(__pa(iommu_sw->overflow_buffer),
+                                 iommu_sw->overflow);
+               free_bootmem_late(__pa(iommu_sw->orig_addr),
+                                 iommu_sw->nslabs * sizeof(phys_addr_t));
+               free_bootmem_late(__pa(iommu_sw->list),
+                                 iommu_sw->nslabs * sizeof(int));
+               free_bootmem_late(__pa(iommu_sw->start),
+                                 iommu_sw->nslabs << IO_TLB_SHIFT);
        }
 }
 
 static int is_swiotlb_buffer(phys_addr_t paddr)
 {
-       return paddr >= virt_to_phys(io_tlb_start) &&
-               paddr < virt_to_phys(io_tlb_end);
+       return paddr >= virt_to_phys(iommu_sw->start) &&
+               paddr < virt_to_phys(iommu_sw->end);
 }
 
 /*
@@ -426,7 +427,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t 
size, int dir)
        unsigned long max_slots;
 
        mask = dma_get_seg_boundary(hwdev);
-       start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
+       start_dma_addr = swiotlb_virt_to_bus(hwdev, iommu_sw->start) & mask;
 
        offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
 
@@ -454,8 +455,8 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t 
size, int dir)
         * request and allocate a buffer from that IO TLB pool.
         */
        spin_lock_irqsave(&io_tlb_lock, flags);
-       index = ALIGN(io_tlb_index, stride);
-       if (index >= io_tlb_nslabs)
+       index = ALIGN(iommu_sw->index, stride);
+       if (index >= iommu_sw->nslabs)
                index = 0;
        wrap = index;
 
@@ -463,7 +464,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t 
size, int dir)
                while (iommu_is_span_boundary(index, nslots, offset_slots,
                                              max_slots)) {
                        index += stride;
-                       if (index >= io_tlb_nslabs)
+                       if (index >= iommu_sw->nslabs)
                                index = 0;
                        if (index == wrap)
                                goto not_found;
@@ -474,26 +475,27 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t 
size, int dir)
                 * contiguous buffers, we allocate the buffers from that slot
                 * and mark the entries as '0' indicating unavailable.
                 */
-               if (io_tlb_list[index] >= nslots) {
+               if (iommu_sw->list[index] >= nslots) {
                        int count = 0;
 
                        for (i = index; i < (int) (index + nslots); i++)
-                               io_tlb_list[i] = 0;
-                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != 
IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
-                               io_tlb_list[i] = ++count;
-                       dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+                               iommu_sw->list[i] = 0;
+                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) !=
+                               IO_TLB_SEGSIZE - 1) && iommu_sw->list[i]; i--)
+                               iommu_sw->list[i] = ++count;
+                       dma_addr = iommu_sw->start + (index << IO_TLB_SHIFT);
 
                        /*
                         * Update the indices to avoid searching in the next
                         * round.
                         */
-                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
+                       iommu_sw->index = ((index + nslots) < iommu_sw->nslabs
                                        ? (index + nslots) : 0);
 
                        goto found;
                }
                index += stride;
-               if (index >= io_tlb_nslabs)
+               if (index >= iommu_sw->nslabs)
                        index = 0;
        } while (index != wrap);
 
@@ -509,7 +511,7 @@ found:
         * needed.
         */
        for (i = 0; i < nslots; i++)
-               io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
+               iommu_sw->orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
                swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
 
@@ -524,8 +526,8 @@ do_unmap_single(struct device *hwdev, char *dma_addr, 
size_t size, int dir)
 {
        unsigned long flags;
        int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t phys = io_tlb_orig_addr[index];
+       int index = (dma_addr - iommu_sw->start) >> IO_TLB_SHIFT;
+       phys_addr_t phys = iommu_sw->orig_addr[index];
 
        /*
         * First, sync the memory before unmapping the entry
@@ -542,19 +544,20 @@ do_unmap_single(struct device *hwdev, char *dma_addr, 
size_t size, int dir)
        spin_lock_irqsave(&io_tlb_lock, flags);
        {
                count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-                        io_tlb_list[index + nslots] : 0);
+                        iommu_sw->list[index + nslots] : 0);
                /*
                 * Step 1: return the slots to the free list, merging the
                 * slots with superceeding slots
                 */
                for (i = index + nslots - 1; i >= index; i--)
-                       io_tlb_list[i] = ++count;
+                       iommu_sw->list[i] = ++count;
                /*
                 * Step 2: merge the returned slots with the preceding slots,
                 * if available (non zero)
                 */
-               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != 
IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
-                       io_tlb_list[i] = ++count;
+               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) !=
+                               IO_TLB_SEGSIZE - 1) && iommu_sw->list[i]; i--)
+                       iommu_sw->list[i] = ++count;
        }
        spin_unlock_irqrestore(&io_tlb_lock, flags);
 }
@@ -563,8 +566,8 @@ static void
 sync_single(struct device *hwdev, char *dma_addr, size_t size,
            int dir, int target)
 {
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t phys = io_tlb_orig_addr[index];
+       int index = (dma_addr - iommu_sw->start) >> IO_TLB_SHIFT;
+       phys_addr_t phys = iommu_sw->orig_addr[index];
 
        phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
 
@@ -663,7 +666,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int 
do_panic)
        printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
               "device %s\n", size, dev ? dev_name(dev) : "?");
 
-       if (size <= io_tlb_overflow || !do_panic)
+       if (size <= iommu_sw->overflow || !do_panic)
                return;
 
        if (dir == DMA_BIDIRECTIONAL)
@@ -705,7 +708,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page 
*page,
        map = map_single(dev, phys, size, dir);
        if (!map) {
                swiotlb_full(dev, size, dir, 1);
-               map = io_tlb_overflow_buffer;
+               map = iommu_sw->overflow_buffer;
        }
 
        dev_addr = swiotlb_virt_to_bus(dev, map);
@@ -960,7 +963,8 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
 int
 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
-       return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
+       return (dma_addr == swiotlb_virt_to_bus(hwdev,
+                                               iommu_sw->overflow_buffer));
 }
 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
 
@@ -973,6 +977,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
 int
 swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-       return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
+       return swiotlb_virt_to_bus(hwdev, iommu_sw->end - 1) <= mask;
 }
 EXPORT_SYMBOL(swiotlb_dma_supported);
-- 
1.6.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.