[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 2/2] swiotlb-xen: override common mmap and get_sgtable dma ops



This commit is dedicated to fix incorrect conversion from
cpu_addr to page address in cases when we get virtual
address which allocated through xen_swiotlb_alloc_coherent()
and can be mapped in the vmalloc range.
As the result, virt_to_page() cannot convert this address
properly and return incorrect page address.

Need to detect such cases and obtains the page address using
vmalloc_to_page() instead.

The reference code for mmap() and get_sgtable() was copied
from kernel/dma/ops_helpers.c and modified to provide
additional detections as described above.

In order to simplify code there was added a new
dma_cpu_addr_to_page() helper.

Signed-off-by: Roman Skakun <roman_skakun@xxxxxxxx>
Reviewed-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
---
 drivers/xen/swiotlb-xen.c | 42 +++++++++++++++++++++++++++++++--------
 1 file changed, 34 insertions(+), 8 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 90bc5fc321bc..9331a8500547 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -118,6 +118,14 @@ static int is_xen_swiotlb_buffer(struct device *dev, 
dma_addr_t dma_addr)
        return 0;
 }
 
+static struct page *cpu_addr_to_page(void *cpu_addr)
+{
+       if (is_vmalloc_addr(cpu_addr))
+               return vmalloc_to_page(cpu_addr);
+       else
+               return virt_to_page(cpu_addr);
+}
+
 static int
 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
 {
@@ -337,7 +345,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
        int order = get_order(size);
        phys_addr_t phys;
        u64 dma_mask = DMA_BIT_MASK(32);
-       struct page *page;
+       struct page *page = cpu_addr_to_page(vaddr);
 
        if (hwdev && hwdev->coherent_dma_mask)
                dma_mask = hwdev->coherent_dma_mask;
@@ -349,11 +357,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
        /* Convert the size to actually allocated. */
        size = 1UL << (order + XEN_PAGE_SHIFT);
 
-       if (is_vmalloc_addr(vaddr))
-               page = vmalloc_to_page(vaddr);
-       else
-               page = virt_to_page(vaddr);
-
        if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
                     range_straddles_page_boundary(phys, size)) &&
            TestClearPageXenRemapped(page))
@@ -573,7 +576,23 @@ xen_swiotlb_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
                     void *cpu_addr, dma_addr_t dma_addr, size_t size,
                     unsigned long attrs)
 {
-       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+       unsigned long user_count = vma_pages(vma);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+       struct page *page = cpu_addr_to_page(cpu_addr);
+       int ret;
+
+       vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off >= count || user_count > count - off)
+               return -ENXIO;
+
+       return remap_pfn_range(vma, vma->vm_start,
+                       page_to_pfn(page) + vma->vm_pgoff,
+                       user_count << PAGE_SHIFT, vma->vm_page_prot);
 }
 
 /*
@@ -585,7 +604,14 @@ xen_swiotlb_get_sgtable(struct device *dev, struct 
sg_table *sgt,
                        void *cpu_addr, dma_addr_t handle, size_t size,
                        unsigned long attrs)
 {
-       return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
+       struct page *page = cpu_addr_to_page(cpu_addr);
+       int ret;
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (!ret)
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+
+       return ret;
 }
 
 const struct dma_map_ops xen_swiotlb_dma_ops = {
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.