# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1181547672 -32400 # Node ID d130a440cfbba669771da8b116f2d31b8da67ea3 # Parent b9f881233e6a426bc1a7fa4456761bebda704441 support dma tracking for swiotlb and xen_dma PATCHNAME: dma_tracking_swiotlb_xen_dma Signed-off-by: Isaku Yamahata diff -r b9f881233e6a -r d130a440cfbb arch/ia64/xen/swiotlb.c --- a/arch/ia64/xen/swiotlb.c Tue Jun 05 22:43:38 2007 +0900 +++ b/arch/ia64/xen/swiotlb.c Mon Jun 11 16:41:12 2007 +0900 @@ -33,6 +33,8 @@ #include #ifdef CONFIG_XEN +#include +#include /* * What DMA mask should Xen use to remap the bounce buffer pool? Most * reports seem to indicate 30 bits is sufficient, except maybe for old @@ -597,7 +599,7 @@ dma_addr_t dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) { - unsigned long dev_addr = virt_to_bus(ptr); + unsigned long dev_addr = gnttab_dma_map_virt(ptr); void *map; BUG_ON(dir == DMA_NONE); @@ -610,6 +612,7 @@ swiotlb_map_single(struct device *hwdev, !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) return dev_addr; + __gnttab_dma_unmap_page(virt_to_page(ptr)); /* * Oh well, have to allocate and map a bounce buffer. */ @@ -672,8 +675,11 @@ swiotlb_unmap_single(struct device *hwde BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); - else if (dir == DMA_FROM_DEVICE) - mark_clean(dma_addr, size); + else { + gnttab_dma_unmap_page(dev_addr); + if (dir == DMA_FROM_DEVICE) + mark_clean(dma_addr, size); + } } /* @@ -774,9 +780,11 @@ swiotlb_map_sg(struct device *hwdev, str for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); - dev_addr = virt_to_bus(addr); + dev_addr = gnttab_dma_map_virt(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { - void *map = map_single(hwdev, addr, sg->length, dir); + void *map; + gnttab_dma_unmap_page(dev_addr); + map = map_single(hwdev, addr, sg->length, dir); sg->dma_address = virt_to_bus(map); if (!map) { /* Don't panic here, we expect map_sg users @@ -808,8 +816,12 @@ swiotlb_unmap_sg(struct device *hwdev, s for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) unmap_single(hwdev, (void *) bus_to_virt(sg->dma_address), sg->dma_length, dir); - else if (dir == DMA_FROM_DEVICE) - mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); + else { + gnttab_dma_unmap_page(sg->dma_address); + if (dir == DMA_FROM_DEVICE) + mark_clean(SG_ENT_VIRT_ADDRESS(sg), + sg->dma_length); + } } /* diff -r b9f881233e6a -r d130a440cfbb arch/ia64/xen/xen_dma.c --- a/arch/ia64/xen/xen_dma.c Tue Jun 05 22:43:38 2007 +0900 +++ b/arch/ia64/xen/xen_dma.c Mon Jun 11 16:41:12 2007 +0900 @@ -26,6 +26,8 @@ #include #include #include +#include +#include #define IOMMU_BUG_ON(test) \ do { \ @@ -58,7 +60,7 @@ xen_map_sg(struct device *dev, struct sc int i; for (i = 0 ; i < nents ; i++) { - sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset; + sg[i].dma_address = gnttab_dma_map_page(sg[i].page) + sg[i].offset; sg[i].dma_length = sg[i].length; IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address)); @@ -72,6 +74,9 @@ xen_unmap_sg(struct device *dev, struct xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int direction) { + int i; + for (i = 0; i < nents; i++) + __gnttab_dma_unmap_page(sg[i].page); } EXPORT_SYMBOL(xen_unmap_sg); @@ -129,7 +134,7 @@ xen_map_single(struct device *dev, void xen_map_single(struct device *dev, void *ptr, size_t size, int direction) { - dma_addr_t dma_addr = virt_to_bus(ptr); + dma_addr_t dma_addr = gnttab_dma_map_virt(ptr); IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size)); IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr)); @@ -142,5 +147,6 @@ xen_unmap_single(struct device *dev, dma xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, int direction) { + gnttab_dma_unmap_page(dma_addr); } EXPORT_SYMBOL(xen_unmap_single);