[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH v4 21/21] VT-d: fold dma_pte_clear_one() into its only caller


  • To: "Beulich, Jan" <JBeulich@xxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Wed, 27 Apr 2022 04:13:11 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=intel.com; dmarc=pass action=none header.from=intel.com; dkim=pass header.d=intel.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=TuKrAf6OA0DwT66o2+6ye9e++dhZiNMNwvQGFdNxR/0=; b=d96z/F3vujFvI/poTD5lyOMTNCCOBnaWsKXB/nbcOgteCvqWrVNflHZBrs9UyRiPmN0+sTU1FPT69kW7WrELuLJQzbzmkpKEBg5KuAOUHaxvel4O3kCPsBAupExpiMjp4Is7p8roF+1N3e3ebOzVBN2EykSw0XmXaq5w5JK/yMivp/NtULH647MN6gHKZ02G02oRrNWgvhj9OxIefSN3JN3A7SM2KKodKd6jZ5uBcWRn1SYdGY/m42EpYoFtsIgbtxDJVIo9OD9dXyWMln3AJ0BF8Ksp9caQ8kQHsfqL7oBd869T4GGbBu3UQiGCemt43nYmflgSyytrM+v9Q0wy3Q==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=b/IcKbyMNhFK5MpRzRXPjSmbF3UKmHwWbRFfqtWxB1sf/q5HFeUh/TcIZJbVIAUL5Z9G19SQmhDDssd+a64JFzCsF4SJiF++J/96rPUp0K9EUzwAcCAb+6YidcxKQ1a+AcVeVwwZ6YxcuEbUB+buuS5jfGKRzknAOhBh7DvMbNwmRzUV1JE+II4mt0oyQb5gwf6+I2CX+/HcKypvkLgkTXwiK1sQ1rCc38VfRo5/F11ixDSn2+dTEdmrNZnPU6QG2x0FOOyfDfzhcarl2Zn4PKrK5Ta6qFNSWAaGpm48OYlO7zuZ61BxhfX1VdVuJ9DcRpbWrNDdctgl6HiIPPIAgg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=intel.com;
  • Cc: "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Pau Monné, Roger <roger.pau@xxxxxxxxxx>
  • Delivery-date: Wed, 27 Apr 2022 04:13:26 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHYWIDPpY/WdqPWHkWCZK6rwaQRsK0DKRsg
  • Thread-topic: [PATCH v4 21/21] VT-d: fold dma_pte_clear_one() into its only caller

> From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: Monday, April 25, 2022 4:45 PM
> 
> This way intel_iommu_unmap_page() ends up quite a bit more similar to
> intel_iommu_map_page().
> 
> No functional change intended.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
> v4: New.
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -806,75 +806,6 @@ static void queue_free_pt(struct domain_
>      iommu_queue_free_pgtable(hd, mfn_to_page(mfn));
>  }
> 
> -/* clear one page's page table */
> -static int dma_pte_clear_one(struct domain *domain, daddr_t addr,
> -                             unsigned int order,
> -                             unsigned int *flush_flags)
> -{
> -    struct domain_iommu *hd = dom_iommu(domain);
> -    struct dma_pte *page = NULL, *pte = NULL, old;
> -    u64 pg_maddr;
> -    unsigned int level = (order / LEVEL_STRIDE) + 1;
> -
> -    spin_lock(&hd->arch.mapping_lock);
> -    /* get target level pte */
> -    pg_maddr = addr_to_dma_page_maddr(domain, addr, level, flush_flags,
> false);
> -    if ( pg_maddr < PAGE_SIZE )
> -    {
> -        spin_unlock(&hd->arch.mapping_lock);
> -        return pg_maddr ? -ENOMEM : 0;
> -    }
> -
> -    page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
> -    pte = &page[address_level_offset(addr, level)];
> -
> -    if ( !dma_pte_present(*pte) )
> -    {
> -        spin_unlock(&hd->arch.mapping_lock);
> -        unmap_vtd_domain_page(page);
> -        return 0;
> -    }
> -
> -    old = *pte;
> -    dma_clear_pte(*pte);
> -    iommu_sync_cache(pte, sizeof(*pte));
> -
> -    while ( pt_update_contig_markers(&page->val,
> -                                     address_level_offset(addr, level),
> -                                     level, PTE_kind_null) &&
> -            ++level < min_pt_levels )
> -    {
> -        struct page_info *pg = maddr_to_page(pg_maddr);
> -
> -        unmap_vtd_domain_page(page);
> -
> -        pg_maddr = addr_to_dma_page_maddr(domain, addr, level,
> flush_flags,
> -                                          false);
> -        BUG_ON(pg_maddr < PAGE_SIZE);
> -
> -        page = map_vtd_domain_page(pg_maddr);
> -        pte = &page[address_level_offset(addr, level)];
> -        dma_clear_pte(*pte);
> -        iommu_sync_cache(pte, sizeof(*pte));
> -
> -        *flush_flags |= IOMMU_FLUSHF_all;
> -        iommu_queue_free_pgtable(hd, pg);
> -        perfc_incr(iommu_pt_coalesces);
> -    }
> -
> -    spin_unlock(&hd->arch.mapping_lock);
> -
> -    unmap_vtd_domain_page(page);
> -
> -    *flush_flags |= IOMMU_FLUSHF_modified;
> -
> -    if ( order && !dma_pte_superpage(old) )
> -        queue_free_pt(hd, maddr_to_mfn(dma_pte_addr(old)),
> -                      order / LEVEL_STRIDE);
> -
> -    return 0;
> -}
> -
>  static int iommu_set_root_entry(struct vtd_iommu *iommu)
>  {
>      u32 sts;
> @@ -2261,6 +2192,12 @@ static int __must_check cf_check intel_i
>  static int __must_check cf_check intel_iommu_unmap_page(
>      struct domain *d, dfn_t dfn, unsigned int order, unsigned int 
> *flush_flags)
>  {
> +    struct domain_iommu *hd = dom_iommu(d);
> +    daddr_t addr = dfn_to_daddr(dfn);
> +    struct dma_pte *page = NULL, *pte = NULL, old;
> +    uint64_t pg_maddr;
> +    unsigned int level = (order / LEVEL_STRIDE) + 1;
> +
>      /* Do nothing if VT-d shares EPT page table */
>      if ( iommu_use_hap_pt(d) )
>          return 0;
> @@ -2269,7 +2206,62 @@ static int __must_check cf_check intel_i
>      if ( iommu_hwdom_passthrough && is_hardware_domain(d) )
>          return 0;
> 
> -    return dma_pte_clear_one(d, dfn_to_daddr(dfn), order, flush_flags);
> +    spin_lock(&hd->arch.mapping_lock);
> +    /* get target level pte */
> +    pg_maddr = addr_to_dma_page_maddr(d, addr, level, flush_flags, false);
> +    if ( pg_maddr < PAGE_SIZE )
> +    {
> +        spin_unlock(&hd->arch.mapping_lock);
> +        return pg_maddr ? -ENOMEM : 0;
> +    }
> +
> +    page = map_vtd_domain_page(pg_maddr);
> +    pte = &page[address_level_offset(addr, level)];
> +
> +    if ( !dma_pte_present(*pte) )
> +    {
> +        spin_unlock(&hd->arch.mapping_lock);
> +        unmap_vtd_domain_page(page);
> +        return 0;
> +    }
> +
> +    old = *pte;
> +    dma_clear_pte(*pte);
> +    iommu_sync_cache(pte, sizeof(*pte));
> +
> +    while ( pt_update_contig_markers(&page->val,
> +                                     address_level_offset(addr, level),
> +                                     level, PTE_kind_null) &&
> +            ++level < min_pt_levels )
> +    {
> +        struct page_info *pg = maddr_to_page(pg_maddr);
> +
> +        unmap_vtd_domain_page(page);
> +
> +        pg_maddr = addr_to_dma_page_maddr(d, addr, level, flush_flags,
> false);
> +        BUG_ON(pg_maddr < PAGE_SIZE);
> +
> +        page = map_vtd_domain_page(pg_maddr);
> +        pte = &page[address_level_offset(addr, level)];
> +        dma_clear_pte(*pte);
> +        iommu_sync_cache(pte, sizeof(*pte));
> +
> +        *flush_flags |= IOMMU_FLUSHF_all;
> +        iommu_queue_free_pgtable(hd, pg);
> +        perfc_incr(iommu_pt_coalesces);
> +    }
> +
> +    spin_unlock(&hd->arch.mapping_lock);
> +
> +    unmap_vtd_domain_page(page);
> +
> +    *flush_flags |= IOMMU_FLUSHF_modified;
> +
> +    if ( order && !dma_pte_superpage(old) )
> +        queue_free_pt(hd, maddr_to_mfn(dma_pte_addr(old)),
> +                      order / LEVEL_STRIDE);
> +
> +    return 0;
>  }
> 
>  static int cf_check intel_iommu_lookup_page(


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.