[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 6/6] vtd: add lookup_page method to iommu_ops



> From: Paul Durrant [mailto:paul.durrant@xxxxxxxxxx]
> Sent: Wednesday, September 12, 2018 7:30 PM
> 
> This patch adds a new method to the VT-d IOMMU implementation to find
> the
> MFN currently mapped by the specified DFN along with a wrapper function
> in generic IOMMU code to call the implementation if it exists.
> 
> This patch also cleans up the initializers in intel_iommu_map_page() and
> uses array-style dereference there, for consistency. A missing check for
> shared EPT is also added to intel_iommu_unmap_page().

then please split into two patches.

> 
> NOTE: This patch only adds a Xen-internal interface. This will be used by
>       a subsequent patch.
>       Another subsequent patch will add similar functionality for AMD
>       IOMMUs.
> 
> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> ---
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
> 
> v7:
>  - Re-base and re-name BFN -> DFN.
>  - Add missing checks for shared EPT and iommu_passthrough.
>  - Remove unnecessary initializers and use array-style dereference.
>  - Drop Wei's R-b because of code churn.
> 
> v3:
>  - Addressed comments from George.
> 
> v2:
>  - Addressed some comments from Jan.
> ---
>  xen/drivers/passthrough/iommu.c     | 11 ++++++++
>  xen/drivers/passthrough/vtd/iommu.c | 52
> +++++++++++++++++++++++++++++++++++--
>  xen/drivers/passthrough/vtd/iommu.h |  3 +++
>  xen/include/xen/iommu.h             |  4 +++
>  4 files changed, 68 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/iommu.c
> b/xen/drivers/passthrough/iommu.c
> index a16f1a0c66..52e3f500c7 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -296,6 +296,17 @@ int iommu_unmap_page(struct domain *d, dfn_t
> dfn)
>      return rc;
>  }
> 
> +int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
> +                      unsigned int *flags)
> +{
> +    const struct domain_iommu *hd = dom_iommu(d);
> +
> +    if ( !iommu_enabled || !hd->platform_ops )
> +        return -EOPNOTSUPP;
> +
> +    return hd->platform_ops->lookup_page(d, dfn, mfn, flags);
> +}
> +
>  static void iommu_free_pagetables(unsigned long unused)
>  {
>      do {
> diff --git a/xen/drivers/passthrough/vtd/iommu.c
> b/xen/drivers/passthrough/vtd/iommu.c
> index 0163bb949b..6622c2dd4c 100644
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1770,7 +1770,7 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
>                                               unsigned int flags)
>  {
>      struct domain_iommu *hd = dom_iommu(d);
> -    struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
> +    struct dma_pte *page, *pte, old, new = {};
>      u64 pg_maddr;
>      int rc = 0;
> 
> @@ -1790,9 +1790,11 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
>          spin_unlock(&hd->arch.mapping_lock);
>          return -ENOMEM;
>      }
> +
>      page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
> -    pte = page + (dfn_x(dfn) & LEVEL_MASK);
> +    pte = &page[dfn_x(dfn) & LEVEL_MASK];
>      old = *pte;
> +
>      dma_set_pte_addr(new, mfn_to_maddr(mfn));
>      dma_set_pte_prot(new,
>                       ((flags & IOMMUF_readable) ? DMA_PTE_READ  : 0) |
> @@ -1808,6 +1810,7 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
>          unmap_vtd_domain_page(page);
>          return 0;
>      }
> +
>      *pte = new;
> 
>      iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
> @@ -1823,6 +1826,10 @@ static int __must_check
> intel_iommu_map_page(struct domain *d,
>  static int __must_check intel_iommu_unmap_page(struct domain *d,
>                                                 dfn_t dfn)
>  {
> +    /* Do nothing if VT-d shares EPT page table */
> +    if ( iommu_use_hap_pt(d) )
> +        return 0;
> +
>      /* Do nothing if hardware domain and iommu supports pass thru. */
>      if ( iommu_passthrough && is_hardware_domain(d) )
>          return 0;
> @@ -1830,6 +1837,46 @@ static int __must_check
> intel_iommu_unmap_page(struct domain *d,
>      return dma_pte_clear_one(d, dfn_to_daddr(dfn));
>  }
> 
> +static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t
> *mfn,
> +                                   unsigned int *flags)
> +{
> +    struct domain_iommu *hd = dom_iommu(d);
> +    struct dma_pte *page, val;
> +    u64 pg_maddr;
> +
> +    /* Fail if VT-d shares EPT page table */
> +    if ( iommu_use_hap_pt(d) )
> +        return -ENOENT;
> +
> +    /* Fail if hardware domain and iommu supports pass thru. */
> +    if ( iommu_passthrough && is_hardware_domain(d) )
> +        return -ENOENT;

why fail instead of returning dfn as mfn? passthrough is just one
special translation mode in IOMMU, which doesn't mean lookup
is not possible.

> +
> +    spin_lock(&hd->arch.mapping_lock);
> +
> +    pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 0);
> +    if ( pg_maddr == 0 )
> +    {
> +        spin_unlock(&hd->arch.mapping_lock);
> +        return -ENOMEM;
> +    }
> +
> +    page = map_vtd_domain_page(pg_maddr);
> +    val = page[dfn_x(dfn) & LEVEL_MASK];
> +
> +    unmap_vtd_domain_page(page);
> +    spin_unlock(&hd->arch.mapping_lock);
> +
> +    if ( !dma_pte_present(val) )
> +        return -ENOENT;
> +
> +    *mfn = maddr_to_mfn(dma_pte_addr(val));
> +    *flags = dma_pte_read(val) ? IOMMUF_readable : 0;
> +    *flags |= dma_pte_write(val) ? IOMMUF_writable : 0;
> +
> +    return 0;
> +}
> +
>  int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
>                      int order, int present)
>  {
> @@ -2655,6 +2702,7 @@ const struct iommu_ops intel_iommu_ops = {
>      .teardown = iommu_domain_teardown,
>      .map_page = intel_iommu_map_page,
>      .unmap_page = intel_iommu_unmap_page,
> +    .lookup_page = intel_iommu_lookup_page,
>      .free_page_table = iommu_free_page_table,
>      .reassign_device = reassign_device_ownership,
>      .get_device_group_id = intel_iommu_group_id,
> diff --git a/xen/drivers/passthrough/vtd/iommu.h
> b/xen/drivers/passthrough/vtd/iommu.h
> index 72c1a2e3cd..47bdfcb5ea 100644
> --- a/xen/drivers/passthrough/vtd/iommu.h
> +++ b/xen/drivers/passthrough/vtd/iommu.h
> @@ -272,6 +272,9 @@ struct dma_pte {
>  #define dma_set_pte_prot(p, prot) do { \
>          (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
>      } while (0)
> +#define dma_pte_prot(p) ((p).val & DMA_PTE_PROT)
> +#define dma_pte_read(p) (dma_pte_prot(p) & DMA_PTE_READ)
> +#define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
>  #define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
>  #define dma_set_pte_addr(p, addr) do {\
>              (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
> diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
> index 9e0b4e8638..bebddc2db4 100644
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -100,6 +100,8 @@ void iommu_teardown(struct domain *d);
>  int __must_check iommu_map_page(struct domain *d, dfn_t dfn,
>                                  mfn_t mfn, unsigned int flags);
>  int __must_check iommu_unmap_page(struct domain *d, dfn_t dfn);
> +int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn,
> mfn_t *mfn,
> +                                   unsigned int *flags);
> 
>  enum iommu_feature
>  {
> @@ -190,6 +192,8 @@ struct iommu_ops {
>      int __must_check (*map_page)(struct domain *d, dfn_t dfn, mfn_t mfn,
>                                   unsigned int flags);
>      int __must_check (*unmap_page)(struct domain *d, dfn_t dfn);
> +    int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t
> *mfn,
> +                                    unsigned int *flags);
>      void (*free_page_table)(struct page_info *);
>  #ifdef CONFIG_X86
>      void (*update_ire_from_apic)(unsigned int apic, unsigned int reg,
> unsigned int value);
> --
> 2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.