[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 04/10] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU unmapping.



Propagate the IOMMU Device-TLB flush error up to IOMMU unmapping.

Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>

CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien.grall@xxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Feng Wu <feng.wu@xxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
CC: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/drivers/passthrough/arm/smmu.c            |  2 +-
 xen/drivers/passthrough/vtd/iommu.c           | 18 ++++++++++--------
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |  2 +-
 xen/include/xen/iommu.h                       |  2 +-
 4 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu.c 
b/xen/drivers/passthrough/arm/smmu.c
index 54a03a6..1ce4ddf 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2774,7 +2774,7 @@ static int arm_smmu_map_page(struct domain *d, unsigned 
long gfn,
        return guest_physmap_add_entry(d, gfn, mfn, 0, t);
 }
 
-static int arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
+static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long 
gfn)
 {
        /*
         * This function should only be used by gnttab code when the domain
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 3ece815..29fb7fd 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -616,11 +616,12 @@ static void intel_iommu_iotlb_flush_all(struct domain *d)
 }
 
 /* clear one page's page table */
-static void dma_pte_clear_one(struct domain *domain, u64 addr)
+static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
 {
     struct domain_iommu *hd = dom_iommu(domain);
     struct dma_pte *page = NULL, *pte = NULL;
     u64 pg_maddr;
+    int rc = 0;
 
     spin_lock(&hd->arch.mapping_lock);
     /* get last level pte */
@@ -628,7 +629,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     if ( pg_maddr == 0 )
     {
         spin_unlock(&hd->arch.mapping_lock);
-        return;
+        return 0;
     }
 
     page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
@@ -638,7 +639,7 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     {
         spin_unlock(&hd->arch.mapping_lock);
         unmap_vtd_domain_page(page);
-        return;
+        return 0;
     }
 
     dma_clear_pte(*pte);
@@ -646,9 +647,11 @@ static void dma_pte_clear_one(struct domain *domain, u64 
addr)
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+        rc = __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
 
     unmap_vtd_domain_page(page);
+
+    return rc;
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1775,15 +1778,14 @@ static int intel_iommu_map_page(
     return 0;
 }
 
-static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
+static int __must_check intel_iommu_unmap_page(struct domain *d,
+                                               unsigned long gfn)
 {
     /* Do nothing if hardware domain and iommu supports pass thru. */
     if ( iommu_passthrough && is_hardware_domain(d) )
         return 0;
 
-    dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
-
-    return 0;
+    return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
 }
 
 int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h 
b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 9c51172..57b6cc1 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -53,7 +53,7 @@ int amd_iommu_update_ivrs_mapping_acpi(void);
 /* mapping functions */
 int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
                        unsigned int flags);
-int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
                                        u64 phys_addr, unsigned long size,
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 19ba976..73a7f1e 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -168,7 +168,7 @@ struct iommu_ops {
     void (*teardown)(struct domain *d);
     int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn,
                     unsigned int flags);
-    int (*unmap_page)(struct domain *d, unsigned long gfn);
+    int __must_check (*unmap_page)(struct domain *d, unsigned long gfn);
     void (*free_page_table)(struct page_info *);
 #ifdef CONFIG_X86
     void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned 
int value);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.