[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 18/23] iommu/intel: small map_page cleanup



Pass the page + offset to the low-level __iommu_map_single helper
(which gets renamed to fit the new calling conventions) as both
callers have the page at hand.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
---
 drivers/iommu/intel-iommu.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 41a4b8808802..66b4444398ae 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev)
        return 0;
 }
 
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
-                                    size_t size, int dir, u64 dma_mask)
+static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size, int dir,
+                                  u64 dma_mask)
 {
+       phys_addr_t paddr = page_to_phys(page) + offset;
        struct dmar_domain *domain;
        phys_addr_t start_paddr;
        unsigned long iova_pfn;
@@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, 
struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       return __intel_map_single(dev, page_to_phys(page) + offset, size,
-                                 dir, *dev->dma_mask);
+       return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,9 +3754,8 @@ static void *intel_alloc_coherent(struct device *dev, 
size_t size,
                return NULL;
        memset(page_address(page), 0, size);
 
-       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
-                                        DMA_BIDIRECTIONAL,
-                                        dev->coherent_dma_mask);
+       *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
+                                      dev->coherent_dma_mask);
        if (*dma_handle)
                return page_address(page);
        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
-- 
2.19.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.