|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 4/7] VT-d: Refactor iommu_ops .iotlb_flush() and iotlb_flush_all()
to pass down a flag indicating whether the lock is being held.
Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
---
xen/arch/arm/p2m.c | 2 +-
xen/common/memory.c | 4 ++--
xen/drivers/passthrough/iommu.c | 9 +++++----
xen/drivers/passthrough/vtd/iommu.c | 5 +++--
xen/drivers/passthrough/x86/iommu.c | 2 +-
xen/include/xen/iommu.h | 17 +++++++++++++----
6 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index e396c40..6eec959 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1100,7 +1100,7 @@ tlbflush:
if ( flush )
{
flush_tlb_domain(d);
- iommu_iotlb_flush(d, sgfn, egfn - sgfn);
+ iommu_iotlb_flush(d, sgfn, egfn - sgfn, NONE_LOCK);
}
out:
diff --git a/xen/common/memory.c b/xen/common/memory.c
index c228d9f..e68c3dd 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -631,9 +631,9 @@ static int xenmem_add_to_physmap(struct domain *d,
if ( need_iommu(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 0;
- rc = iommu_iotlb_flush(d, xatp->idx - done, done);
+ rc = iommu_iotlb_flush(d, xatp->idx - done, done, NONE_LOCK);
if ( !rc )
- rc = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ rc = iommu_iotlb_flush(d, xatp->gpfn - done, done, NONE_LOCK);
}
#endif
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index cdf8e9a..ebd6d47 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -270,24 +270,25 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(),
&cpu_online_map));
}
-int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count)
+int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+ unsigned int page_count, unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush
)
return 0;
- return hd->platform_ops->iotlb_flush(d, gfn, page_count);
+ return hd->platform_ops->iotlb_flush(d, gfn, page_count, lock);
}
-int iommu_iotlb_flush_all(struct domain *d)
+int iommu_iotlb_flush_all(struct domain *d, unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops ||
!hd->platform_ops->iotlb_flush_all )
return 0;
- return hd->platform_ops->iotlb_flush_all(d);
+ return hd->platform_ops->iotlb_flush_all(d, lock);
}
int __init iommu_setup(void)
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index a780632..e8cbfdb 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -601,12 +601,13 @@ static int __intel_iommu_iotlb_flush(struct domain *d,
unsigned long gfn,
return rc;
}
-static int intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
unsigned int page_count)
+static int intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+ unsigned int page_count, unsigned int lock)
{
return __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
}
-static int intel_iommu_iotlb_flush_all(struct domain *d)
+static int intel_iommu_iotlb_flush_all(struct domain *d, unsigned int lock)
{
return __intel_iommu_iotlb_flush(d, 0, 0, 0);
}
diff --git a/xen/drivers/passthrough/x86/iommu.c
b/xen/drivers/passthrough/x86/iommu.c
index 6674fb0..4bbf5f8 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -105,7 +105,7 @@ int arch_iommu_populate_page_table(struct domain *d)
if ( !rc )
{
- rc = iommu_iotlb_flush_all(d);
+ rc = iommu_iotlb_flush_all(d, PCIDEVS_LOCK);
if ( rc )
return rc;
}
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index f5b6f7e..f58e9d6 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -131,6 +131,13 @@ struct page_info;
* callback pair.
*/
typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt);
+/*
+ * A flag indicates whether the lock is being held.
+ * NONE_LOCK - no lock is being held.
+ * PCIDEVS_LOCK - pcidevs_lock is being held.
+ */
+#define NONE_LOCK 0
+#define PCIDEVS_LOCK 1
struct iommu_ops {
int (*init)(struct domain *d);
@@ -161,8 +168,9 @@ struct iommu_ops {
void (*resume)(void);
void (*share_p2m)(struct domain *d);
int (*crash_shutdown)(void);
- int (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int
page_count);
- int (*iotlb_flush_all)(struct domain *d);
+ int (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int
page_count,
+ unsigned int lock);
+ int (*iotlb_flush_all)(struct domain *d, unsigned int lock);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
void (*dump_p2m_table)(struct domain *d);
};
@@ -182,8 +190,9 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain
*d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count);
-int iommu_iotlb_flush_all(struct domain *d);
+int iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int
page_count,
+ unsigned int lock);
+int iommu_iotlb_flush_all(struct domain *d, unsigned int lock);
/*
* The purpose of the iommu_dont_flush_iotlb optional cpu flag is to
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |