[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 7/9] VT-d: centralize mapping of QI entries


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 9 Jun 2021 11:29:31 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=wObYJPemsOXsGyhjl3tI23Gvv+HFSVMfSoBU/wvfj5Q=; b=OwXjyYj+scBn2M8zzk4qjF3vjYTkHVuw2tJHAzLC5Hp4AM8TaNlmdsFFj562xCIlYNKZDIxo2mUasM667ZEImIeR4CKvTIqPg0SLXA2J6BJNsWDDkMeHVF5Mjusn60sYe5GA9gVIxDDKRBMoTutbZZn8TTdNQ3ahl4Oc+TDe/9wT97972dPRDulxMheKrESG4X1skvIBgzugERboEj47d47Vo08zjeQFS38f6LgPQ5nfD7kLJJ/YgOxurF6Q5655R0cvP+dxu+Qf398yo74I5i/W5obcVMTWVwOXR1YJAmIPGDgOG/H9CmsASlFY6xO9O5iWzQG5Vof87mTyhh5YaA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=njzdSJMVjinNRJyNlUVNY1h70Q6WeBsLdM2lkds/qZzL79k9Xqun1HfoR1mgOeXsekczJ3yfjAnKSIaa6P8z/0bHpZPmSTNZsSv3t/KRbWGqaOZzqrRwI6MFMeGkhZmp4uwdJcWGRBPsEKbLySFVc6SCmVHKQHatmIPRCh4H3ueaGR/u9i4oWhJt+fE70WcTwB/4gvdK7VVwVE1AJZM9Ej9TDpXOix+PX9386WhZKFrg4vXnJ/qrYUNeUX8JYGuWGOjDRCtyWOPNdL2pal5KivYPxe7AxU15wA+A/FWtpkFLfvDWfBCykRVmL5RLLL46W6liki23bnWgHkdGU2cVbw==
  • Authentication-results: intel.com; dkim=none (message not signed) header.d=none;intel.com; dmarc=none action=none header.from=suse.com;
  • Cc: Paul Durrant <paul@xxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Kevin Tian <kevin.tian@xxxxxxxxx>
  • Delivery-date: Wed, 09 Jun 2021 09:29:40 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Introduce a helper function to reduce redundancy. Take the opportunity
to express the logic without using the somewhat odd QINVAL_ENTRY_ORDER.
Also take the opportunity to uniformly unmap after updating queue tail
and dropping the lock (like was done so far only by
queue_invalidate_context_sync()).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
I wonder though whether we wouldn't be better off permanently mapping
the queue(s).

--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -69,6 +69,16 @@ static void qinval_update_qtail(struct v
     dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
 }
 
+static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
+                                         unsigned int index)
+{
+    paddr_t base = iommu->qinval_maddr +
+                   ((index * sizeof(struct qinval_entry)) & PAGE_MASK);
+    struct qinval_entry *entries = map_vtd_domain_page(base);
+
+    return &entries[index % (PAGE_SIZE / sizeof(*entries))];
+}
+
 static int __must_check queue_invalidate_context_sync(struct vtd_iommu *iommu,
                                                       u16 did, u16 source_id,
                                                       u8 function_mask,
@@ -76,15 +86,11 @@ static int __must_check queue_invalidate
 {
     unsigned long flags;
     unsigned int index;
-    u64 entry_base;
-    struct qinval_entry *qinval_entry, *qinval_entries;
+    struct qinval_entry *qinval_entry;
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    entry_base = iommu->qinval_maddr +
-                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
-    qinval_entries = map_vtd_domain_page(entry_base);
-    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+    qinval_entry = qi_map_entry(iommu, index);
 
     qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
     qinval_entry->q.cc_inv_dsc.lo.granu = granu;
@@ -98,7 +104,7 @@ static int __must_check queue_invalidate
     qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 
-    unmap_vtd_domain_page(qinval_entries);
+    unmap_vtd_domain_page(qinval_entry);
 
     return invalidate_sync(iommu);
 }
@@ -110,15 +116,11 @@ static int __must_check queue_invalidate
 {
     unsigned long flags;
     unsigned int index;
-    u64 entry_base;
-    struct qinval_entry *qinval_entry, *qinval_entries;
+    struct qinval_entry *qinval_entry;
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    entry_base = iommu->qinval_maddr +
-                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
-    qinval_entries = map_vtd_domain_page(entry_base);
-    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+    qinval_entry = qi_map_entry(iommu, index);
 
     qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
     qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
@@ -133,10 +135,11 @@ static int __must_check queue_invalidate
     qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
     qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
 
-    unmap_vtd_domain_page(qinval_entries);
     qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 
+    unmap_vtd_domain_page(qinval_entry);
+
     return invalidate_sync(iommu);
 }
 
@@ -147,17 +150,13 @@ static int __must_check queue_invalidate
     static DEFINE_PER_CPU(uint32_t, poll_slot);
     unsigned int index;
     unsigned long flags;
-    u64 entry_base;
-    struct qinval_entry *qinval_entry, *qinval_entries;
+    struct qinval_entry *qinval_entry;
     uint32_t *this_poll_slot = &this_cpu(poll_slot);
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
     index = qinval_next_index(iommu);
-    entry_base = iommu->qinval_maddr +
-                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
-    qinval_entries = map_vtd_domain_page(entry_base);
-    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+    qinval_entry = qi_map_entry(iommu, index);
 
     qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
     qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
@@ -167,10 +166,11 @@ static int __must_check queue_invalidate
     qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
     qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);
 
-    unmap_vtd_domain_page(qinval_entries);
     qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 
+    unmap_vtd_domain_page(qinval_entry);
+
     /* Now we don't support interrupt method */
     if ( sw )
     {
@@ -246,16 +246,12 @@ int qinval_device_iotlb_sync(struct vtd_
 {
     unsigned long flags;
     unsigned int index;
-    u64 entry_base;
-    struct qinval_entry *qinval_entry, *qinval_entries;
+    struct qinval_entry *qinval_entry;
 
     ASSERT(pdev);
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    entry_base = iommu->qinval_maddr +
-                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
-    qinval_entries = map_vtd_domain_page(entry_base);
-    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+    qinval_entry = qi_map_entry(iommu, index);
 
     qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
     qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
@@ -268,10 +264,11 @@ int qinval_device_iotlb_sync(struct vtd_
     qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
     qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;
 
-    unmap_vtd_domain_page(qinval_entries);
     qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 
+    unmap_vtd_domain_page(qinval_entry);
+
     return dev_invalidate_sync(iommu, pdev, did);
 }
 
@@ -280,16 +277,12 @@ static int __must_check queue_invalidate
 {
     unsigned long flags;
     unsigned int index;
-    u64 entry_base;
-    struct qinval_entry *qinval_entry, *qinval_entries;
+    struct qinval_entry *qinval_entry;
     int ret;
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    entry_base = iommu->qinval_maddr +
-                 ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
-    qinval_entries = map_vtd_domain_page(entry_base);
-    qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
+    qinval_entry = qi_map_entry(iommu, index);
 
     qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
     qinval_entry->q.iec_inv_dsc.lo.granu = granu;
@@ -299,10 +292,11 @@ static int __must_check queue_invalidate
     qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
     qinval_entry->q.iec_inv_dsc.hi.res = 0;
 
-    unmap_vtd_domain_page(qinval_entries);
     qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 
+    unmap_vtd_domain_page(qinval_entry);
+
     ret = invalidate_sync(iommu);
 
     /*




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.