diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index d40e9e5fc..fb21b542c 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -29,13 +29,11 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order) for_each_memblock(memory, reg) { if (reg->base < (phys_addr_t)0xffffffff) { - if (IS_ENABLED(CONFIG_ZONE_DMA32)) - flags |= __GFP_DMA32; - else - flags |= __GFP_DMA; + flags |= __GFP_DMA; break; } } + printk("DEBUG %s %d flags=%x\n",__func__,__LINE__,flags); return __get_free_pages(flags, order); } diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index b6d27762c..ff9d07e28 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -191,6 +191,7 @@ int __ref xen_swiotlb_init(int verbose, bool early) * IO TLB memory already allocated. Just use it. */ if (io_tlb_start != 0) { + start_dma_addr = io_tlb_start; xen_io_tlb_start = phys_to_virt(io_tlb_start); goto end; } @@ -224,6 +225,7 @@ int __ref xen_swiotlb_init(int verbose, bool early) m_ret = XEN_SWIOTLB_ENOMEM; goto error; } +printk("DEBUG %s %d start_virt=%lx order=%lu phys=%llx vmalloc?=%d\n",__func__,__LINE__,(unsigned long)xen_io_tlb_start, order, virt_to_phys(xen_io_tlb_start), is_vmalloc_addr(xen_io_tlb_start)); /* * And replace that memory with pages under 4GB. */ @@ -255,6 +257,7 @@ int __ref xen_swiotlb_init(int verbose, bool early) if (!rc) swiotlb_set_max_segment(PAGE_SIZE); +printk("DEBUG %s %d start=%llx end=%llx rc=%d\n",__func__,__LINE__,start_dma_addr,start_dma_addr+bytes,rc); return rc; error: if (repeat--) { @@ -324,6 +327,10 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, } SetPageXenRemapped(virt_to_page(ret)); } +if (!dma_capable(hwdev, *dma_handle, size, true)) + printk("DEBUG1 %s %d phys=%llx dma=%llx dma_mask=%llx\n",__func__,__LINE__,phys,*dma_handle,dma_mask); +if (dev_addr + size - 1 > dma_mask) + printk("DEBUG2 %s %d phys=%llx dma=%llx dma_mask=%llx\n",__func__,__LINE__,phys,*dma_handle,dma_mask); memset(ret, 0, size); return ret; } @@ -335,6 +342,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, int order = get_order(size); phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); + struct page *pg; if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; @@ -345,12 +353,16 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); - +printk(KERN_CRIT "===1 before\n"); + pg = is_vmalloc_addr(vaddr) ? vmalloc_to_page(vaddr) : virt_to_page(vaddr); +printk(KERN_CRIT "==== middle\n"); + BUG_ON(!pg); +printk(KERN_CRIT "==== after\n"); if (!WARN_ON((dev_addr + size - 1 > dma_mask) || range_straddles_page_boundary(phys, size)) && - TestClearPageXenRemapped(virt_to_page(vaddr))) + TestClearPageXenRemapped(pg)) xen_destroy_contiguous_region(phys, order); - +printk(KERN_CRIT "==== done\n"); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); } @@ -398,6 +410,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size, true))) { +printk("DEBUG3 %s %d phys=%llx dma=%llx\n",__func__,__LINE__,phys,dev_addr); swiotlb_tbl_unmap_single(dev, map, size, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h index b9cc11e88..50c7a2e96 100644 --- a/include/xen/arm/page-coherent.h +++ b/include/xen/arm/page-coherent.h @@ -8,12 +8,17 @@ static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { + void *cpu_addr; + if (dma_alloc_from_dev_coherent(hwdev, size, dma_handle, &cpu_addr)) + return cpu_addr; return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); } static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { + if (dma_release_from_dev_coherent(hwdev, get_order(size), cpu_addr)) + return; dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); }