# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1259762764 0
# Node ID 7e64a8e666b8326ee82ea2c2b60fb90fddd91af8
# Parent 97ab480f7e80a7de75d21ba1764942be473cd0dd
VT-d: get rid of hardcode in iommu_flush_cache_entry
Currently iommu_flush_cache_entry uses a fixed size 8 bytes to flush
cache. But it also needs to flush caches with different sizes,
e.g. struct root_entry is 16 bytes. This patch fixes the hardcode by
using a parameter "size" to flush caches with different sizes.
Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
xen-unstable changeset: 20557:04037c99b5f1
xen-unstable date: Wed Dec 02 08:48:36 2009 +0000
---
xen/drivers/passthrough/vtd/intremap.c | 4 ++--
xen/drivers/passthrough/vtd/iommu.c | 24 ++++++++++++------------
xen/drivers/passthrough/vtd/vtd.h | 2 +-
3 files changed, 15 insertions(+), 15 deletions(-)
diff -r 97ab480f7e80 -r 7e64a8e666b8 xen/drivers/passthrough/vtd/intremap.c
--- a/xen/drivers/passthrough/vtd/intremap.c Tue Dec 01 14:25:20 2009 +0000
+++ b/xen/drivers/passthrough/vtd/intremap.c Wed Dec 02 14:06:04 2009 +0000
@@ -212,7 +212,7 @@ static int ioapic_rte_to_remap_entry(str
}
memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
- iommu_flush_cache_entry(iremap_entry);
+ iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
invalidate_sync(iommu);
@@ -443,7 +443,7 @@ static int msi_msg_to_remap_entry(
remap_rte->data = 0;
memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
- iommu_flush_cache_entry(iremap_entry);
+ iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
invalidate_sync(iommu);
diff -r 97ab480f7e80 -r 7e64a8e666b8 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c Tue Dec 01 14:25:20 2009 +0000
+++ b/xen/drivers/passthrough/vtd/iommu.c Wed Dec 02 14:06:04 2009 +0000
@@ -114,7 +114,7 @@ struct iommu_flush *iommu_get_flush(stru
static unsigned int clflush_size;
static int iommus_incoherent;
-static void __iommu_flush_cache(void *addr, int size)
+static void __iommu_flush_cache(void *addr, unsigned int size)
{
int i;
@@ -125,9 +125,9 @@ static void __iommu_flush_cache(void *ad
cacheline_flush((char *)addr + i);
}
-void iommu_flush_cache_entry(void *addr)
-{
- __iommu_flush_cache(addr, 8);
+void iommu_flush_cache_entry(void *addr, unsigned int size)
+{
+ __iommu_flush_cache(addr, size);
}
void iommu_flush_cache_page(void *addr, unsigned long npages)
@@ -155,7 +155,7 @@ static u64 bus_to_context_maddr(struct i
}
set_root_value(*root, maddr);
set_root_present(*root);
- iommu_flush_cache_entry(root);
+ iommu_flush_cache_entry(root, sizeof(struct root_entry));
}
maddr = (u64) get_context_addr(*root);
unmap_vtd_domain_page(root_entries);
@@ -200,7 +200,7 @@ static u64 addr_to_dma_page_maddr(struct
*/
dma_set_pte_readable(*pte);
dma_set_pte_writable(*pte);
- iommu_flush_cache_entry(pte);
+ iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
}
else
{
@@ -524,9 +524,9 @@ static void dma_pte_clear_one(struct dom
return;
}
- dma_clear_pte(*pte);
+ dma_clear_pte(*pte);
spin_unlock(&hd->mapping_lock);
- iommu_flush_cache_entry(pte);
+ iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
/* No need pcidevs_lock here since do that on assign/deassign device*/
for_each_drhd_unit ( drhd )
@@ -562,7 +562,7 @@ static void iommu_free_pagetable(u64 pt_
iommu_free_pagetable(dma_pte_addr(*pte), next_level);
dma_clear_pte(*pte);
- iommu_flush_cache_entry(pte);
+ iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
}
unmap_vtd_domain_page(pt_vaddr);
@@ -1083,7 +1083,7 @@ static int domain_context_mapping_one(
context_set_address_width(*context, agaw);
context_set_fault_enable(*context);
context_set_present(*context);
- iommu_flush_cache_entry(context);
+ iommu_flush_cache_entry(context, sizeof(struct context_entry));
spin_unlock(&iommu->lock);
/* Context entry was previously non-present (with domid 0). */
@@ -1307,7 +1307,7 @@ static int domain_context_unmap_one(
context_clear_present(*context);
context_clear_entry(*context);
- iommu_flush_cache_entry(context);
+ iommu_flush_cache_entry(context, sizeof(struct context_entry));
if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
(((u16)bus) << 8) | devfn,
@@ -1479,7 +1479,7 @@ int intel_iommu_map_page(
if ( iommu_snoop )
dma_set_pte_snp(*pte);
- iommu_flush_cache_entry(pte);
+ iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
spin_unlock(&hd->mapping_lock);
unmap_vtd_domain_page(page);
diff -r 97ab480f7e80 -r 7e64a8e666b8 xen/drivers/passthrough/vtd/vtd.h
--- a/xen/drivers/passthrough/vtd/vtd.h Tue Dec 01 14:25:20 2009 +0000
+++ b/xen/drivers/passthrough/vtd/vtd.h Wed Dec 02 14:06:04 2009 +0000
@@ -106,7 +106,7 @@ void *map_vtd_domain_page(u64 maddr);
void *map_vtd_domain_page(u64 maddr);
void unmap_vtd_domain_page(void *va);
-void iommu_flush_cache_entry(void *addr);
+void iommu_flush_cache_entry(void *addr, unsigned int size);
void iommu_flush_cache_page(void *addr, unsigned long npages);
#endif // _VTD_H_
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|