[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] swiotlb-xen: override common mmap and get_sgtable dma ops



This commit is dedicated to fix incorrect convertion from
cpu_addr to page address in cases when we get virtual
address which allocated throught xen_swiotlb_alloc_coherent()
and can be mapped in the vmalloc range.
As the result, virt_to_page() cannot convert this address
properly and return incorrect page address.

Need to detect such cases and obtains the page address using
vmalloc_to_page() instead.

The reference code was copied from kernel/dma/ops_helpers.c
and modified to provide additional detections as described
above.

Signed-off-by: Roman Skakun <roman_skakun@xxxxxxxx>
Reviewed-by: Andrii Anisov <andrii_anisov@xxxxxxxx>

---
Also, I have observed that the original common code didn't 
make additional checks about contiguity of the memory region
represented by cpu_addr and size

May be, this means that these functions can get only physically 
contiguous memory.
Is this correct?

Cheers!

---
 drivers/xen/swiotlb-xen.c | 51 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 49 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 2b385c1b4a99..f99c98472927 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -563,6 +563,53 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
        return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
 }
 
+static int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               unsigned long attrs)
+{
+       unsigned long user_count = vma_pages(vma);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+       struct page *page;
+
+       if (is_vmalloc_addr(cpu_addr))
+               page = vmalloc_to_page(cpu_addr);
+       else
+               page = virt_to_page(cpu_addr);
+
+       vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return -ENXIO;
+
+       if (off >= count || user_count > count - off)
+               return -ENXIO;
+
+       return remap_pfn_range(vma, vma->vm_start,
+                       page_to_pfn(page) + vma->vm_pgoff,
+                       user_count << PAGE_SHIFT, vma->vm_page_prot);
+}
+
+static int
+xen_swiotlb_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                unsigned long attrs)
+{
+       struct page *page;
+       int ret;
+
+       if (is_vmalloc_addr(cpu_addr))
+               page = vmalloc_to_page(cpu_addr);
+       else
+               page = virt_to_page(cpu_addr);
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (!ret)
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return ret;
+}
+
 const struct dma_map_ops xen_swiotlb_dma_ops = {
        .alloc = xen_swiotlb_alloc_coherent,
        .free = xen_swiotlb_free_coherent,
@@ -575,8 +622,8 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
        .map_page = xen_swiotlb_map_page,
        .unmap_page = xen_swiotlb_unmap_page,
        .dma_supported = xen_swiotlb_dma_supported,
-       .mmap = dma_common_mmap,
-       .get_sgtable = dma_common_get_sgtable,
+       .mmap = xen_swiotlb_dma_mmap,
+       .get_sgtable = xen_swiotlb_dma_get_sgtable,
        .alloc_pages = dma_common_alloc_pages,
        .free_pages = dma_common_free_pages,
 };
-- 
2.27.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.