[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/15] [swiotlb] Replace the [phys, bus]->virt and virt->[bus, phys] functions with iommu_sw calls.



We replace all of the address translation calls to go through the
iommu_sw functions.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
 lib/swiotlb.c |   26 +++++++++++++-------------
 1 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4da8151..075b56c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -127,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
 
 /* Note that this doesn't work with highmem page */
 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
-                                     volatile void *address)
+                                     void *address)
 {
        return phys_to_dma(hwdev, virt_to_phys(address));
 }
@@ -461,7 +461,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t 
size, int dir)
        unsigned long max_slots;
 
        mask = dma_get_seg_boundary(hwdev);
-       start_dma_addr = swiotlb_virt_to_bus(hwdev, iommu_sw->start) & mask;
+       start_dma_addr = iommu_sw->virt_to_bus(hwdev, iommu_sw->start) & mask;
 
        offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
 
@@ -636,7 +636,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                dma_mask = hwdev->coherent_dma_mask;
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
+       if (ret && iommu_sw->virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
                /*
                 * The allocated memory isn't reachable by the device.
                 */
@@ -655,7 +655,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
        }
 
        memset(ret, 0, size);
-       dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+       dev_addr = iommu_sw->virt_to_bus(hwdev, ret);
 
        /* Confirm address can be DMA'd by device */
        if (dev_addr + size - 1 > dma_mask) {
@@ -676,7 +676,7 @@ void
 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
                      dma_addr_t dev_addr)
 {
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+       phys_addr_t paddr = iommu_sw->bus_to_phys(hwdev, dev_addr);
 
        WARN_ON(irqs_disabled());
        if (!iommu_sw->is_swiotlb_buffer(iommu_sw, dev_addr, paddr))
@@ -724,7 +724,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page 
*page,
                            struct dma_attrs *attrs)
 {
        phys_addr_t phys = page_to_phys(page) + offset;
-       dma_addr_t dev_addr = phys_to_dma(dev, phys);
+       dma_addr_t dev_addr = iommu_sw->phys_to_bus(dev, phys);
        void *map;
 
        BUG_ON(dir == DMA_NONE);
@@ -745,7 +745,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page 
*page,
                map = iommu_sw->overflow_buffer;
        }
 
-       dev_addr = swiotlb_virt_to_bus(dev, map);
+       dev_addr = iommu_sw->virt_to_bus(dev, map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -768,7 +768,7 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
                         size_t size, int dir)
 {
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+       phys_addr_t paddr = iommu_sw->bus_to_phys(hwdev, dev_addr);
 
        BUG_ON(dir == DMA_NONE);
 
@@ -811,7 +811,7 @@ static void
 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
                    size_t size, int dir, int target)
 {
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+       phys_addr_t paddr = iommu_sw->bus_to_phys(hwdev, dev_addr);
 
        BUG_ON(dir == DMA_NONE);
 
@@ -900,7 +900,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl, int nelems,
 
        for_each_sg(sgl, sg, nelems, i) {
                phys_addr_t paddr = sg_phys(sg);
-               dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
+               dma_addr_t dev_addr = iommu_sw->phys_to_bus(hwdev, paddr);
 
                if (swiotlb_force ||
                    !iommu_sw->dma_capable(hwdev, dev_addr, paddr, sg->length)) 
{
@@ -915,7 +915,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl, int nelems,
                                sgl[0].dma_length = 0;
                                return 0;
                        }
-                       sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
+                       sg->dma_address = iommu_sw->virt_to_bus(hwdev, map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
@@ -997,7 +997,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
 int
 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
-       return (dma_addr == swiotlb_virt_to_bus(hwdev,
+       return (dma_addr == iommu_sw->virt_to_bus(hwdev,
                                                iommu_sw->overflow_buffer));
 }
 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
@@ -1011,6 +1011,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
 int
 swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-       return swiotlb_virt_to_bus(hwdev, iommu_sw->end - 1) <= mask;
+       return iommu_sw->virt_to_bus(hwdev, iommu_sw->end - 1) <= mask;
 }
 EXPORT_SYMBOL(swiotlb_dma_supported);
-- 
1.6.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.