|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v5 5/7] VT-d: Refactor iommu_ops .map_page() and unmap_page()
to pass down a flag indicating whether the lock is being held,
and check the way up the call trees.
Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
---
xen/arch/x86/mm.c | 9 ++++++---
xen/arch/x86/mm/p2m-ept.c | 7 ++++---
xen/arch/x86/mm/p2m-pt.c | 7 ++++---
xen/arch/x86/mm/p2m.c | 24 +++++++++++++++---------
xen/arch/x86/x86_64/mm.c | 5 +++--
xen/common/grant_table.c | 11 +++++++----
xen/drivers/passthrough/amd/iommu_map.c | 7 ++++---
xen/drivers/passthrough/amd/pci_amd_iommu.c | 3 ++-
xen/drivers/passthrough/arm/smmu.c | 2 +-
xen/drivers/passthrough/iommu.c | 11 ++++++-----
xen/drivers/passthrough/vtd/iommu.c | 10 ++++++----
xen/drivers/passthrough/vtd/x86/vtd.c | 5 +++--
xen/drivers/passthrough/x86/iommu.c | 3 ++-
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 4 ++--
xen/include/asm-x86/p2m.h | 6 ++++--
xen/include/xen/iommu.h | 8 ++++----
16 files changed, 73 insertions(+), 49 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 1e50b94..f9030e5 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2443,14 +2443,17 @@ static int __get_page_type(struct page_info *page,
unsigned long type,
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ NONE_LOCK);
else if ( type == PGT_writable_page )
{
rc = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ IOMMUF_readable|IOMMUF_writable,
+ NONE_LOCK);
if ( rc )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ NONE_LOCK);
}
}
}
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 9e1f5c6..ecf7e67 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -835,16 +835,17 @@ out:
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
{
- rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i,
iommu_flags);
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i,
+ iommu_flags, NONE_LOCK);
if ( rc )
{
while ( i-- > 0 )
- iommu_unmap_page(d, gfn + i);
+ iommu_unmap_page(d, gfn + i, NONE_LOCK);
}
}
else
for ( i = 0; i < (1 << order); i++ )
- iommu_unmap_page(d, gfn + i);
+ iommu_unmap_page(d, gfn + i, NONE_LOCK);
}
}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 942a11c..e73c0e8 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -677,16 +677,17 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long
gfn, mfn_t mfn,
for ( i = 0; i < (1UL << page_order); i++ )
{
rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ iommu_pte_flags, NONE_LOCK);
if ( rc )
{
while ( i-- > 0 )
- iommu_unmap_page(p2m->domain, gfn + i);
+ iommu_unmap_page(p2m->domain, gfn + i,
+ NONE_LOCK);
}
}
else
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_unmap_page(p2m->domain, gfn + i);
+ iommu_unmap_page(p2m->domain, gfn + i, NONE_LOCK);
}
/*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c6b883d..76748d4 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -610,7 +610,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn,
unsigned long mfn,
{
if ( need_iommu(p2m->domain) )
for ( i = 0; i < (1 << page_order); i++ )
- iommu_unmap_page(p2m->domain, mfn + i);
+ iommu_unmap_page(p2m->domain, mfn + i, NONE_LOCK);
return 0;
}
@@ -662,12 +662,13 @@ guest_physmap_add_entry(struct domain *d, unsigned long
gfn,
{
for ( i = 0; i < (1 << page_order); i++ )
{
- rc = iommu_map_page(
- d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable);
+ rc = iommu_map_page(d, mfn + i, mfn + i,
+ IOMMUF_readable|IOMMUF_writable,
+ NONE_LOCK);
if ( rc != 0 )
{
while ( i-- > 0 )
- iommu_unmap_page(d, mfn + i);
+ iommu_unmap_page(d, mfn + i, NONE_LOCK);
return rc;
}
}
@@ -948,7 +949,8 @@ int set_mmio_p2m_entry(struct domain *d, unsigned long gfn,
mfn_t mfn,
}
int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
- p2m_access_t p2ma, unsigned int flag)
+ p2m_access_t p2ma, unsigned int flag,
+ unsigned int lock)
{
p2m_type_t p2mt;
p2m_access_t a;
@@ -960,7 +962,8 @@ int set_identity_p2m_entry(struct domain *d, unsigned long
gfn,
{
if ( !need_iommu(d) )
return 0;
- return iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
+ return iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable,
+ lock);
}
gfn_lock(p2m, gfn, 0);
@@ -979,7 +982,9 @@ int set_identity_p2m_entry(struct domain *d, unsigned long
gfn,
* RMRRs are correctly mapped with IOMMU.
*/
if ( is_hardware_domain(d) && !iommu_use_hap_pt(d) )
- ret = iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
+ ret = iommu_map_page(d, gfn, gfn,
+ IOMMUF_readable|IOMMUF_writable,
+ lock);
}
else
{
@@ -1032,7 +1037,8 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long
gfn, mfn_t mfn)
return rc;
}
-int clear_identity_p2m_entry(struct domain *d, unsigned long gfn)
+int clear_identity_p2m_entry(struct domain *d, unsigned long gfn,
+ unsigned int lock)
{
p2m_type_t p2mt;
p2m_access_t a;
@@ -1044,7 +1050,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned
long gfn)
{
if ( !need_iommu(d) )
return 0;
- return iommu_unmap_page(d, gfn);
+ return iommu_unmap_page(d, gfn, lock);
}
gfn_lock(p2m, gfn, 0);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index d918002..ebd6fad 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1433,12 +1433,13 @@ int memory_add(unsigned long spfn, unsigned long epfn,
unsigned int pxm)
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, i, i,
IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_page(hardware_domain, i, i,
+ IOMMUF_readable|IOMMUF_writable, NONE_LOCK) )
break;
if ( i != epfn )
{
while (i-- > old_max)
- iommu_unmap_page(hardware_domain, i);
+ iommu_unmap_page(hardware_domain, i, NONE_LOCK);
goto destroy_m2p;
}
}
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 1b9bd05..8bc233e 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -953,12 +953,14 @@ __gnttab_map_grant_ref(
{
if ( !(kind & MAPKIND_WRITE) )
err = iommu_map_page(ld, frame, frame,
- IOMMUF_readable|IOMMUF_writable);
+ IOMMUF_readable|IOMMUF_writable,
+ NONE_LOCK);
}
else if ( act_pin && !old_pin )
{
if ( !kind )
- err = iommu_map_page(ld, frame, frame, IOMMUF_readable);
+ err = iommu_map_page(ld, frame, frame,
+ IOMMUF_readable, NONE_LOCK);
}
if ( err )
{
@@ -1178,9 +1180,10 @@ __gnttab_unmap_common(
kind = mapkind(lgt, rd, op->frame);
if ( !kind )
- err = iommu_unmap_page(ld, op->frame);
+ err = iommu_unmap_page(ld, op->frame, NONE_LOCK);
else if ( !(kind & MAPKIND_WRITE) )
- err = iommu_map_page(ld, op->frame, op->frame, IOMMUF_readable);
+ err = iommu_map_page(ld, op->frame, op->frame,
+ IOMMUF_readable, NONE_LOCK);
double_gt_unlock(lgt, rgt);
diff --git a/xen/drivers/passthrough/amd/iommu_map.c
b/xen/drivers/passthrough/amd/iommu_map.c
index 78862c9..523feec 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -634,7 +634,7 @@ static int update_paging_mode(struct domain *d, unsigned
long gfn)
}
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags)
+ unsigned int flags, unsigned int lock)
{
bool_t need_flush = 0;
struct hvm_iommu *hd = domain_hvm_iommu(d);
@@ -714,7 +714,7 @@ out:
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
+int amd_iommu_unmap_page(struct domain *d, unsigned long gfn, unsigned int
lock)
{
unsigned long pt_mfn[7];
struct hvm_iommu *hd = domain_hvm_iommu(d);
@@ -777,7 +777,8 @@ int amd_iommu_reserve_domain_unity_map(struct domain
*domain,
gfn = phys_addr >> PAGE_SHIFT;
for ( i = 0; i < npages; i++ )
{
- rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags);
+ rt = amd_iommu_map_page(domain, gfn +i, gfn +i, flags,
+ PCIDEVS_LOCK);
if ( rt != 0 )
return rt;
}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 449de13..1655dd9 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -298,7 +298,8 @@ static int __hwdom_init amd_iommu_hwdom_init(struct domain
*d)
*/
if ( mfn_valid(pfn) )
amd_iommu_map_page(d, pfn, pfn,
- IOMMUF_readable|IOMMUF_writable);
+ IOMMUF_readable|IOMMUF_writable,
+ PCIDEVS_LOCK);
if ( !(i & 0xfffff) )
process_pending_softirqs();
diff --git a/xen/drivers/passthrough/arm/smmu.c
b/xen/drivers/passthrough/arm/smmu.c
index 155b7f3..f89ee1b 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2781,7 +2781,7 @@ static int arm_smmu_map_page(struct domain *d, unsigned
long gfn,
return guest_physmap_add_entry(d, gfn, mfn, 0, t);
}
-static int arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
+static int arm_smmu_unmap_page(struct domain *d, unsigned long gfn, unsigned
int lock)
{
/*
* This function should only be used by gnttab code when the domain
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index ebd6d47..8b5c0a3 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -172,7 +172,8 @@ int __hwdom_init iommu_hwdom_init(struct domain *d)
((page->u.inuse.type_info & PGT_type_mask)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- rc = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+ rc = hd->platform_ops->map_page(d, gfn, mfn, mapping,
+ PCIDEVS_LOCK);
if ( rc )
return rc;
@@ -233,24 +234,24 @@ void iommu_domain_destroy(struct domain *d)
}
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags)
+ unsigned int flags, unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- return hd->platform_ops->map_page(d, gfn, mfn, flags);
+ return hd->platform_ops->map_page(d, gfn, mfn, flags, lock);
}
-int iommu_unmap_page(struct domain *d, unsigned long gfn)
+int iommu_unmap_page(struct domain *d, unsigned long gfn, unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- return hd->platform_ops->unmap_page(d, gfn);
+ return hd->platform_ops->unmap_page(d, gfn, lock);
}
static void iommu_free_pagetables(unsigned long unused)
diff --git a/xen/drivers/passthrough/vtd/iommu.c
b/xen/drivers/passthrough/vtd/iommu.c
index e8cbfdb..6696b16 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1711,7 +1711,7 @@ static void iommu_domain_teardown(struct domain *d)
static int intel_iommu_map_page(
struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags)
+ unsigned int flags, unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
@@ -1765,7 +1765,8 @@ static int intel_iommu_map_page(
return 0;
}
-static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
+static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn,
+ unsigned int lock)
{
/* Do nothing if hardware domain and iommu supports pass thru. */
if ( iommu_passthrough && is_hardware_domain(d) )
@@ -1865,7 +1866,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t
map,
while ( base_pfn < end_pfn )
{
- if ( clear_identity_p2m_entry(d, base_pfn) )
+ if ( clear_identity_p2m_entry(d, base_pfn, PCIDEVS_LOCK) )
ret = -ENXIO;
base_pfn++;
}
@@ -1881,7 +1882,8 @@ static int rmrr_identity_mapping(struct domain *d, bool_t
map,
while ( base_pfn < end_pfn )
{
- int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
+ int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag,
+ PCIDEVS_LOCK);
if ( err )
return err;
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c
b/xen/drivers/passthrough/vtd/x86/vtd.c
index a19177c..0d4aea7 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -143,11 +143,12 @@ int __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
for ( j = 0; j < tmp; j++ )
{
rc = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
- IOMMUF_readable|IOMMUF_writable);
+ IOMMUF_readable|IOMMUF_writable,
+ NONE_LOCK);
if ( rc )
{
while ( j-- > 0 )
- iommu_unmap_page(d, pfn * tmp + j);
+ iommu_unmap_page(d, pfn * tmp + j, NONE_LOCK);
break;
}
}
diff --git a/xen/drivers/passthrough/x86/iommu.c
b/xen/drivers/passthrough/x86/iommu.c
index 4bbf5f8..9267a54 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -67,7 +67,8 @@ int arch_iommu_populate_page_table(struct domain *d)
BUG_ON(SHARED_M2P(gfn));
rc = hd->platform_ops->map_page(d, gfn, mfn,
IOMMUF_readable |
- IOMMUF_writable);
+ IOMMUF_writable,
+ PCIDEVS_LOCK);
}
if ( rc )
{
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 4691f9b..9fc678a 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -52,8 +52,8 @@ int amd_iommu_update_ivrs_mapping_acpi(void);
/* mapping functions */
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags);
-int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+ unsigned int flags, unsigned int lock);
+int amd_iommu_unmap_page(struct domain *d, unsigned long gfn, unsigned int
lock);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
u64 phys_addr, unsigned long size,
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 5e99ac6..3c12c95 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -562,8 +562,10 @@ int clear_mmio_p2m_entry(struct domain *d, unsigned long
gfn, mfn_t mfn);
/* Set identity addresses in the p2m table (for pass-through) */
int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
- p2m_access_t p2ma, unsigned int flag);
-int clear_identity_p2m_entry(struct domain *d, unsigned long gfn);
+ p2m_access_t p2ma, unsigned int flag,
+ unsigned int lock);
+int clear_identity_p2m_entry(struct domain *d, unsigned long gfn,
+ unsigned int lock);
/* Add foreign mapping to the guest's p2m table. */
int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index f58e9d6..27e0e23 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -75,8 +75,8 @@ void iommu_teardown(struct domain *d);
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags);
-int iommu_unmap_page(struct domain *d, unsigned long gfn);
+ unsigned int flags, unsigned int lock);
+int iommu_unmap_page(struct domain *d, unsigned long gfn, unsigned int lock);
enum iommu_feature
{
@@ -156,8 +156,8 @@ struct iommu_ops {
void (*teardown)(struct domain *d);
int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags);
- int (*unmap_page)(struct domain *d, unsigned long gfn);
+ unsigned int flags, unsigned int lock);
+ int (*unmap_page)(struct domain *d, unsigned long gfn, unsigned int lock);
void (*free_page_table)(struct page_info *);
#ifdef CONFIG_X86
void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned
int value);
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |