# HG changeset patch # User yamahata@xxxxxxxxxxxxx # Date 1181545129 -32400 # Node ID c50f01e58f9d6f2a54ce4e59bd053f97860fb9ec # Parent d130a440cfbba669771da8b116f2d31b8da67ea3 support dma tracking for sba_iommu.c PATCHNAME: dma_tracking_sba_iommu_c Signed-off-by: Isaku Yamahata diff -r d130a440cfbb -r c50f01e58f9d arch/ia64/hp/common/sba_iommu.c --- a/arch/ia64/hp/common/sba_iommu.c Mon Jun 11 16:41:12 2007 +0900 +++ b/arch/ia64/hp/common/sba_iommu.c Mon Jun 11 15:58:49 2007 +0900 @@ -42,6 +42,10 @@ #include /* wmb() */ #include +#ifdef CONFIG_XEN +#include +#include +#endif #define PFX "IOC: " @@ -901,7 +905,19 @@ sba_map_single(struct device *dev, void unsigned long flags; #endif #ifdef ALLOW_IOV_BYPASS +#ifdef CONFIG_XEN + unsigned long pci_addr; + void* tmp_addr = addr - PAGE_SIZE; + size_t tmp_size = size + PAGE_SIZE; + do { + tmp_addr += PAGE_SIZE; + tmp_size -= PAGE_SIZE; + gnttab_dma_use_page(virt_to_page(addr)); + } while (tmp_size > PAGE_SIZE); + pci_addr = virt_to_bus(addr); +#else unsigned long pci_addr = virt_to_bus(addr); +#endif ASSERT(to_pci_dev(dev)->dma_mask); /* @@ -994,6 +1010,48 @@ sba_mark_clean(struct ioc *ioc, dma_addr } #endif +#ifdef CONFIG_XEN +static void +sba_gnttab_dma_unmap_page(struct ioc *ioc, dma_addr_t iova, size_t size) +{ + u32 iovp = (u32) SBA_IOVP(ioc,iova); + int off = PDIR_INDEX(iovp); + int i; + size_t step_size = max(PAGE_SIZE, iovp_size); + + if (!is_running_on_xen()) + return; + + for (;;) { + size_t unmap_pages = + (min(step_size, size) + PAGE_SIZE - 1) / PAGE_SIZE; + dma_addr_t dma_address = ioc->pdir_base[off] & + ~0xE000000000000FFFULL; + for (i = 0; i < unmap_pages; i++) { + gnttab_dma_unmap_page(dma_address); + dma_address += PAGE_SIZE; + } + + if (size <= step_size) + break; + off += step_size / iovp_size; + size -= step_size; + } +} + +static void +sba_gnttab_dma_use_sg(struct scatterlist *sglist, int nents) +{ + int i; + + if (!is_running_on_xen()) + return; + + for (i = 0; i < nents; i++) + gnttab_dma_use_page(sglist[i].page); +} +#endif + /** * sba_unmap_single - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. @@ -1003,7 +1061,11 @@ sba_mark_clean(struct ioc *ioc, dma_addr * * See Documentation/DMA-mapping.txt */ +#ifdef CONFIG_XEN +static void __sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir, int gnttab_unmap) +#else void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) +#endif { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -1016,7 +1078,8 @@ void sba_unmap_single(struct device *dev ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS - if (likely((iova & ioc->imask) != ioc->ibase)) { + if (likely((iova & ioc->imask) != ioc->ibase) && + !range_straddles_page_boundary(bus_to_virt(iova), size)) { /* ** Address does not fall w/in IOVA, must be bypassing */ @@ -1027,6 +1090,17 @@ void sba_unmap_single(struct device *dev mark_clean(bus_to_virt(iova), size); } #endif +#ifdef CONFIG_XEN + if (gnttab_unmap) { + for (;;) { + gnttab_dma_unmap_page(iova); + if (size <= PAGE_SIZE) + break; + iova += PAGE_SIZE; + size -= PAGE_SIZE; + } + } +#endif return; } #endif @@ -1043,7 +1117,11 @@ void sba_unmap_single(struct device *dev if (dir == DMA_FROM_DEVICE) sba_mark_clean(ioc, iova, size); #endif - +#ifdef CONFIG_XEN + if (gnttab_unmap) + sba_gnttab_dma_unmap_page(ioc, iova, size); +#endif + #if DELAYED_RESOURCE_CNT > 0 spin_lock_irqsave(&ioc->saved_lock, flags); d = &(ioc->saved[ioc->saved_cnt]); @@ -1071,6 +1149,14 @@ void sba_unmap_single(struct device *dev #endif /* DELAYED_RESOURCE_CNT == 0 */ } +#ifdef CONFIG_XEN +void +sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) +{ + __sba_unmap_single(dev, iova, size, dir, 1); +} +#endif + /** * sba_alloc_coherent - allocate/map shared mem for DMA @@ -1427,7 +1513,11 @@ int sba_map_sg(struct device *dev, struc if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { for (sg = sglist ; filled < nents ; filled++, sg++){ sg->dma_length = sg->length; +#ifdef CONFIG_XEN + sg->dma_address = gnttab_dma_map_page(sg->page) + sg->offset; +#else sg->dma_address = virt_to_bus(sba_sg_address(sg)); +#endif } return filled; } @@ -1450,6 +1540,10 @@ int sba_map_sg(struct device *dev, struc #endif prefetch(ioc->res_hint); + +#ifdef CONFIG_XEN + sba_gnttab_dma_use_sg(sglist, nents); +#endif /* ** First coalesce the chunks and allocate I/O pdir space @@ -1517,8 +1611,12 @@ void sba_unmap_sg (struct device *dev, s #endif while (nents && sglist->dma_length) { - +#ifdef CONFIG_XEN + __sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir, 0); + __gnttab_dma_unmap_page(sglist->page); +#else sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); +#endif sglist++; nents--; }