Index: 2005-12-16/arch/i386/kernel/pci-dma-xen.c =================================================================== --- 2005-12-16.orig/arch/i386/kernel/pci-dma-xen.c 2005-12-23 14:42:54.992496344 +0100 +++ 2005-12-16/arch/i386/kernel/pci-dma-xen.c 2005-12-23 14:14:26.000000000 +0100 @@ -291,24 +291,6 @@ dma_unmap_single(struct device *dev, dma } EXPORT_SYMBOL(dma_unmap_single); -void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - if (swiotlb) - swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction); -} -EXPORT_SYMBOL(dma_sync_single_for_cpu); - -void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - if (swiotlb) - swiotlb_sync_single_for_device(dev, dma_handle, size, direction); -} -EXPORT_SYMBOL(dma_sync_single_for_device); - /* * Local variables: * c-file-style: "linux" Index: 2005-12-16/include/asm-i386/dma-mapping.h =================================================================== --- 2005-12-16.orig/include/asm-i386/dma-mapping.h 2005-12-23 14:42:54.997495584 +0100 +++ 2005-12-16/include/asm-i386/dma-mapping.h 2005-12-23 15:08:26.393687976 +0100 @@ -6,6 +6,8 @@ #include #include #include +#include +#include #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) @@ -16,73 +18,22 @@ void *dma_alloc_coherent(struct device * void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - flush_write_buffers(); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++ ) { - BUG_ON(!sg[i].page); - - sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; - } - - flush_write_buffers(); - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return page_to_phys(page) + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_single_for_device(dev, dma_handle, size, direction); flush_write_buffers(); } @@ -91,6 +42,9 @@ dma_sync_single_range_for_cpu(struct dev unsigned long offset, size_t size, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_single_range_for_cpu(dev, dma_handle, offset, size, direction); } static inline void @@ -98,6 +52,9 @@ dma_sync_single_range_for_device(struct unsigned long offset, size_t size, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_single_range_for_device(dev, dma_handle, offset, size, direction); flush_write_buffers(); } @@ -105,36 +62,22 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_sg_for_cpu(dev, sg, nelems, direction); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); + if (swiotlb) + swiotlb_sync_sg_for_device(dev, sg, nelems, direction); flush_write_buffers(); } static inline int -dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -static inline int -dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if(mask < 0x00ffffff) - return 0; - - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 mask) { if(!dev->dma_mask || !dma_supported(dev, mask)) Index: 2005-12-16/include/asm-i386/mach-default/mach_dma_map.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-default/mach_dma_map.h 2005-12-23 14:10:38.000000000 +0100 @@ -0,0 +1,80 @@ +#ifndef __ASM_MACH_DMA_MAP_H +#define __ASM_MACH_DMA_MAP_H + +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + flush_write_buffers(); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++ ) { + BUG_ON(!sg[i].page); + + sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; + } + + flush_write_buffers(); + return nents; +} + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_mapping_error(dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if(mask < 0x00ffffff) + return 0; + + return 1; +} + +#endif /* __ASM_MACH_DMA_MAP_H */ Index: 2005-12-16/include/asm-i386/mach-xen/asm/dma-mapping.h =================================================================== --- 2005-12-16.orig/include/asm-i386/mach-xen/asm/dma-mapping.h 2005-12-23 14:42:54.995495888 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -#ifndef _ASM_I386_DMA_MAPPING_H -#define _ASM_I386_DMA_MAPPING_H - -/* - * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for - * documentation. - */ - -#include -#include -#include -#include -#include -#include - -static inline int -address_needs_mapping(struct device *hwdev, dma_addr_t addr) -{ - dma_addr_t mask = 0xffffffff; - /* If the device has a mask, use it, otherwise default to 32 bits */ - if (hwdev && hwdev->dma_mask) - mask = *hwdev->dma_mask; - return (addr & ~mask) != 0; -} - -static inline int -range_straddles_page_boundary(void *p, size_t size) -{ - extern unsigned long *contiguous_bitmap; - return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) && - !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap)); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -extern dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction); - -extern void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction); - -extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); -extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - -extern dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction); - -extern void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction); - -extern void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - -extern void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - if (swiotlb) - swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction); - flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - if (swiotlb) - swiotlb_sync_sg_for_device(dev,sg,nelems,direction); - flush_write_buffers(); -} - -extern int -dma_mapping_error(dma_addr_t dma_addr); - -extern int -dma_supported(struct device *dev, u64 mask); - -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - -#ifdef __i386__ -static inline int -dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all x86, so return the - * maximum possible, to be safe */ - return (1 << L1_CACHE_SHIFT_MAX); -} -#else -extern int dma_get_cache_alignment(void); -#endif - -#define dma_is_consistent(d) (1) - -static inline void -dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); - -#endif Index: 2005-12-16/include/asm-i386/mach-xen/mach_dma_map.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-xen/mach_dma_map.h 2005-12-23 14:18:11.000000000 +0100 @@ -0,0 +1,47 @@ +#ifndef __ASM_MACH_DMA_MAP_H +#define __ASM_MACH_DMA_MAP_H + +static inline int +address_needs_mapping(struct device *hwdev, dma_addr_t addr) +{ + dma_addr_t mask = 0xffffffff; + /* If the device has a mask, use it, otherwise default to 32 bits */ + if (hwdev && hwdev->dma_mask) + mask = *hwdev->dma_mask; + return (addr & ~mask) != 0; +} + +static inline int +range_straddles_page_boundary(void *p, size_t size) +{ + extern unsigned long *contiguous_bitmap; + return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) && + !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap)); +} + +extern dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); +extern void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); +extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + +extern dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction); +extern void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction); + +extern int +dma_mapping_error(dma_addr_t dma_addr); + +extern int +dma_supported(struct device *dev, u64 mask); + +#endif /* __ASM_MACH_DMA_MAP_H */ Index: 2005-12-16/include/asm-i386/swiotlb.h =================================================================== --- 2005-12-16.orig/include/asm-i386/swiotlb.h 2005-12-23 09:46:58.000000000 +0100 +++ 2005-12-16/include/asm-i386/swiotlb.h 2005-12-23 15:08:00.359645752 +0100 @@ -40,7 +40,20 @@ extern int swiotlb; #else /* CONFIG_SWIOTLB */ #define swiotlb 0 +#define swiotlb_sync_single_for_cpu(dev, addr, size, dir) \ + ((void)(dev),(void)(addr),(void)(size),(void)(dir)) +#define swiotlb_sync_single_for_device(dev, addr, size, dir) \ + ((void)(dev),(void)(addr),(void)(size),(void)(dir)) +#define swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir) \ + ((void)(dev),(void)(sg),(void)(nelems),(void)(dir)) +#define swiotlb_sync_sg_for_device(dev, sg, nelems, dir) \ + ((void)(dev),(void)(sg),(void)(nelems),(void)(dir)) #endif +#define swiotlb_sync_single_range_for_cpu(dev, dma_handle, offset, size, direction) \ + ((void)(offset), swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction)) +#define swiotlb_sync_single_range_for_device(dev, dma_handle, offset, size, direction) \ + ((void)(offset), swiotlb_sync_single_for_device(dev, dma_handle, size, direction)) + #endif /* _ASM_SWIOTLB_H */ Index: 2005-12-16/include/asm-x86_64/mach-xen/asm/dma-mapping.h =================================================================== --- 2005-12-16.orig/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2005-12-23 14:42:54.994496040 +0100 +++ 2005-12-16/include/asm-x86_64/mach-xen/asm/dma-mapping.h 2005-12-23 13:51:04.000000000 +0100 @@ -1 +1,146 @@ -#include +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H + +/* + * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for + * documentation. + */ + +#include +#include +#include +#include +#include +#include + +static inline int +address_needs_mapping(struct device *hwdev, dma_addr_t addr) +{ + dma_addr_t mask = 0xffffffff; + /* If the device has a mask, use it, otherwise default to 32 bits */ + if (hwdev && hwdev->dma_mask) + mask = *hwdev->dma_mask; + return (addr & ~mask) != 0; +} + +static inline int +range_straddles_page_boundary(void *p, size_t size) +{ + extern unsigned long *contiguous_bitmap; + return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) && + !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap)); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); + +extern void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); +extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + +extern dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction); + +extern void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + dma_sync_single_for_device(dev, dma_handle+offset, size, direction); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + if (swiotlb) + swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction); + flush_write_buffers(); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + if (swiotlb) + swiotlb_sync_sg_for_device(dev,sg,nelems,direction); + flush_write_buffers(); +} + +extern int +dma_mapping_error(dma_addr_t dma_addr); + +extern int +dma_supported(struct device *dev, u64 mask); + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +extern int dma_get_cache_alignment(void); + +#define dma_is_consistent(d) (1) + +static inline void +dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + flush_write_buffers(); +} + +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +extern int +dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags); + +extern void +dma_release_declared_memory(struct device *dev); + +extern void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); + +#endif