| 
    
 [Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [PATCH v4 20/21] VT-d: fold iommu_flush_iotlb{,_pages}()
 > From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: Monday, April 25, 2022 4:45 PM
> 
> With iommu_flush_iotlb_all() gone, iommu_flush_iotlb_pages() is merely a
> wrapper around the not otherwise called iommu_flush_iotlb(). Fold both
> functions.
> 
> No functional change intended.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> ---
> v4: New.
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -728,9 +728,9 @@ static int __must_check iommu_flush_all(
>      return rc;
>  }
> 
> -static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
> -                                          bool_t dma_old_pte_present,
> -                                          unsigned long page_count)
> +static int __must_check cf_check iommu_flush_iotlb(struct domain *d,
> dfn_t dfn,
> +                                                   unsigned long page_count,
> +                                                   unsigned int flush_flags)
>  {
>      struct domain_iommu *hd = dom_iommu(d);
>      struct acpi_drhd_unit *drhd;
> @@ -739,6 +739,17 @@ static int __must_check iommu_flush_iotl
>      int iommu_domid;
>      int ret = 0;
> 
> +    if ( flush_flags & IOMMU_FLUSHF_all )
> +    {
> +        dfn = INVALID_DFN;
> +        page_count = 0;
> +    }
> +    else
> +    {
> +        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
> +        ASSERT(flush_flags);
> +    }
> +
>      /*
>       * No need pcideves_lock here because we have flush
>       * when assign/deassign device
> @@ -765,7 +776,7 @@ static int __must_check iommu_flush_iotl
>              rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
>                                         dfn_to_daddr(dfn),
>                                         get_order_from_pages(page_count),
> -                                       !dma_old_pte_present,
> +                                       !(flush_flags & 
> IOMMU_FLUSHF_modified),
>                                         flush_dev_iotlb);
> 
>          if ( rc > 0 )
> @@ -777,25 +788,6 @@ static int __must_check iommu_flush_iotl
>      return ret;
>  }
> 
> -static int __must_check cf_check iommu_flush_iotlb_pages(
> -    struct domain *d, dfn_t dfn, unsigned long page_count,
> -    unsigned int flush_flags)
> -{
> -    if ( flush_flags & IOMMU_FLUSHF_all )
> -    {
> -        dfn = INVALID_DFN;
> -        page_count = 0;
> -    }
> -    else
> -    {
> -        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
> -        ASSERT(flush_flags);
> -    }
> -
> -    return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
> -                             page_count);
> -}
> -
>  static void queue_free_pt(struct domain_iommu *hd, mfn_t mfn, unsigned
> int level)
>  {
>      if ( level > 1 )
> @@ -3254,7 +3246,7 @@ static const struct iommu_ops __initcons
>      .suspend = vtd_suspend,
>      .resume = vtd_resume,
>      .crash_shutdown = vtd_crash_shutdown,
> -    .iotlb_flush = iommu_flush_iotlb_pages,
> +    .iotlb_flush = iommu_flush_iotlb,
>      .get_reserved_device_memory =
> intel_iommu_get_reserved_device_memory,
>      .dump_page_tables = vtd_dump_page_tables,
>  };
 
  | 
  
![]()  | 
            
         Lists.xenproject.org is hosted with RackSpace, monitoring our  |