[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH 7/9] VT-d: centralize mapping of QI entries


  • To: Jan Beulich <jbeulich@xxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Thu, 24 Jun 2021 05:31:55 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=intel.com; dmarc=pass action=none header.from=intel.com; dkim=pass header.d=intel.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=Sw0NNNPf+YdLoMRpj5oeIHakIeM8LzpNM2Bz7Honjfo=; b=DGBVUu+XbVhyS7AockZ2hyepdV8zjsBx1f354KgxTJbpk2jcEnrJ0N9gUN8jFebrbq1DKE1prdugpTneMXJkxusd//HYAGUPxLu5ImPm/0FjcD7Low3Z6V0eFupeb41/HAMgwoTUeoeIPQBWDf/kv0qL6wti5TiPu15VF0XHr6/mTCsnRgUIaGtdxY1vMjRbnwGgKnrh+OU6YwG5anFLuVdWjQSYuaHmv0uGQRz3P0BVR7QOD44ug48Z/wyJRtUkWUOE94k2RAMbJXX3EGGJ9Uwvmo6q7Bk6Hd/Ry9GHLThJ6ahMvXdHN2ttSHa/8H4mMeYY/DlwCIQa/8RJSU4rvg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=H4ynksWP83zvP8z/f5T+XZ+i9bRdLtd2GLawQ4xhAD6UcDQHJIv65VD25jFhKhK9+xa//GmUfSkcBoWrkUVbLY1RVJpXPsF7VnqDQBCSMKuwltqzsoilMPvlq2JNdc4uMjSYHh4wP8NQnT+EZLjThNLBQdB4iOBKiQNpBadeHLztp7KAtQdQqjRqZosN7+EYZ+O2eOUpslzsgFu+yu7mHe2zxEqE9c6+7w/G/TCzLJU4FGTqOKdJN3GlFg6b5/G9ebiQtXRYVNK5lS5+1ntVdzDG1SVo1AF2tKrj3qfqAO91nbepKtrsN/zSwm+0l7FZbfoy/ICwyNbJsJIYtHvPsQ==
  • Authentication-results: suse.com; dkim=none (message not signed) header.d=none;suse.com; dmarc=none action=none header.from=intel.com;
  • Cc: Paul Durrant <paul@xxxxxxx>, "Cooper, Andrew" <andrew.cooper3@xxxxxxxxxx>
  • Delivery-date: Thu, 24 Jun 2021 05:32:04 +0000
  • Dlp-product: dlpe-windows
  • Dlp-reaction: no-action
  • Dlp-version: 11.5.1.3
  • Ironport-sdr: DuZpDOoSbiW2FTecW7idsJCL04fUPoFSwcWN6V9HxF24ggWGMy1J4tSdFiTpHrIpNDfOTaXr8e 8kVWN18Pd+sA==
  • Ironport-sdr: Lj5Fg4CvjkAFXz0aMGoM+06KB+SPkoTvIarg1K75N3yr3mKluEkusAOp3BuBBYSKu1aLr7ttCk SXxfY3oq0SrA==
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHXXRH9xEyZTXpMY0a5atXgjiDN6KsiujIA
  • Thread-topic: [PATCH 7/9] VT-d: centralize mapping of QI entries

> From: Jan Beulich <jbeulich@xxxxxxxx>
> Sent: Wednesday, June 9, 2021 5:30 PM
> 
> Introduce a helper function to reduce redundancy. Take the opportunity
> to express the logic without using the somewhat odd QINVAL_ENTRY_ORDER.
> Also take the opportunity to uniformly unmap after updating queue tail
> and dropping the lock (like was done so far only by
> queue_invalidate_context_sync()).
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
> I wonder though whether we wouldn't be better off permanently mapping
> the queue(s).
> 
> --- a/xen/drivers/passthrough/vtd/qinval.c
> +++ b/xen/drivers/passthrough/vtd/qinval.c
> @@ -69,6 +69,16 @@ static void qinval_update_qtail(struct v
>      dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
>  }
> 
> +static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
> +                                         unsigned int index)
> +{
> +    paddr_t base = iommu->qinval_maddr +
> +                   ((index * sizeof(struct qinval_entry)) & PAGE_MASK);
> +    struct qinval_entry *entries = map_vtd_domain_page(base);
> +
> +    return &entries[index % (PAGE_SIZE / sizeof(*entries))];
> +}
> +
>  static int __must_check queue_invalidate_context_sync(struct vtd_iommu
> *iommu,
>                                                        u16 did, u16 source_id,
>                                                        u8 function_mask,
> @@ -76,15 +86,11 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
>      qinval_entry->q.cc_inv_dsc.lo.granu = granu;
> @@ -98,7 +104,7 @@ static int __must_check queue_invalidate
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> -    unmap_vtd_domain_page(qinval_entries);
> +    unmap_vtd_domain_page(qinval_entry);
> 
>      return invalidate_sync(iommu);
>  }
> @@ -110,15 +116,11 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
>      qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
> @@ -133,10 +135,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
>      qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      return invalidate_sync(iommu);
>  }
> 
> @@ -147,17 +150,13 @@ static int __must_check queue_invalidate
>      static DEFINE_PER_CPU(uint32_t, poll_slot);
>      unsigned int index;
>      unsigned long flags;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
>      uint32_t *this_poll_slot = &this_cpu(poll_slot);
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
>      qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
> @@ -167,10 +166,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
>      qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      /* Now we don't support interrupt method */
>      if ( sw )
>      {
> @@ -246,16 +246,12 @@ int qinval_device_iotlb_sync(struct vtd_
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
> 
>      ASSERT(pdev);
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
>      qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
> @@ -268,10 +264,11 @@ int qinval_device_iotlb_sync(struct vtd_
>      qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
>      qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      return dev_invalidate_sync(iommu, pdev, did);
>  }
> 
> @@ -280,16 +277,12 @@ static int __must_check queue_invalidate
>  {
>      unsigned long flags;
>      unsigned int index;
> -    u64 entry_base;
> -    struct qinval_entry *qinval_entry, *qinval_entries;
> +    struct qinval_entry *qinval_entry;
>      int ret;
> 
>      spin_lock_irqsave(&iommu->register_lock, flags);
>      index = qinval_next_index(iommu);
> -    entry_base = iommu->qinval_maddr +
> -                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
> -    qinval_entries = map_vtd_domain_page(entry_base);
> -    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
> +    qinval_entry = qi_map_entry(iommu, index);
> 
>      qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
>      qinval_entry->q.iec_inv_dsc.lo.granu = granu;
> @@ -299,10 +292,11 @@ static int __must_check queue_invalidate
>      qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
>      qinval_entry->q.iec_inv_dsc.hi.res = 0;
> 
> -    unmap_vtd_domain_page(qinval_entries);
>      qinval_update_qtail(iommu, index);
>      spin_unlock_irqrestore(&iommu->register_lock, flags);
> 
> +    unmap_vtd_domain_page(qinval_entry);
> +
>      ret = invalidate_sync(iommu);
> 
>      /*


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.