[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 08/21] AMD/IOMMU: walk trees upon page fault


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 25 Apr 2022 10:36:42 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=BtcZhyZ3f1vF1sJNNT1uwgtm+sm1Ld+qz9aFACUX3vg=; b=GvKxR8c/dwRcqKyi34eO0hf+wIesvubCyO5d1FCLYVZV24GlKBrUaRhSjYFSmYtNjByxYrt57Gwhc2nFdQaStJxT0IKWl1Cs8uJ9lMsM+49APqZKVxUPXymZWR3QL7mfvy4KnRDVIh4NG7N1Arb3s0cYaSfl1/BNm6J/Qev1aUpB3yXsWWYZsbcldjupgGD8Z0JuqHNvCSV55/RSsTbqxfz7joIEx+0Ac70FHbbH/x9/PXAIPf4ZOxVQ8miQ9IdDykdFQBzUpY5jGZP2IrHQUB0ql9k1fZkKQjTUWRkIV0U7xTVai3JLdPH81BTSb4KzcJzB9o8HZo664s/qaLZZWw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=XdNXV77H1NXVLu3v1tX98yi5giqrhi1AVSWRKpIeJDImc59aqNeb1coPEGfr9PadNvQBThoYp3+JTZpR3S5Q+UL4s6CRgGdWz5qPmBvaFP8MWAK1IvqS/6ZUCFQuwSwY301oZ//Jd2XMM0gPR3K3k4q2XCrFI9Ko2TiGaSsvBYoObjAFIDyEv2fGDqfMVSRU7NxBBR11ts8OtPXie1qlOtK4WvqtAnrqWuY13Bii81DBPoyTB6aIAyq7tcRGXAJwVq+ELq+ocjNMVi+4zocQ9vHZhKNj79oZu7jDkt2bUT8ybXcmVg/QYMfTPKq+NXrvovjtrucdaJdNahZUf91Xhg==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Paul Durrant <paul@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Mon, 25 Apr 2022 08:36:49 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This is to aid diagnosing issues and largely matches VT-d's behavior.
Since I'm adding permissions output here as well, take the opportunity
and also add their displaying to amd_dump_page_table_level().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Note: "largely matches VT-d's behavior" includes the lack of any locking
      here. Adding suitable locking may not be that easy, as we'd need
      to determine which domain's mapping lock to acquire in addition to
      the necessary IOMMU lock (for the device table access), and
      whether that domain actually still exists. The latter is because
      if we really want to play safe here, imo we also need to account
      for the device table to be potentially corrupted / stale.
---
v4: Re-base.

--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -259,6 +259,8 @@ int __must_check cf_check amd_iommu_flus
     struct domain *d, dfn_t dfn, unsigned long page_count,
     unsigned int flush_flags);
 int __must_check cf_check amd_iommu_flush_iotlb_all(struct domain *d);
+void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int 
dev_id,
+                             dfn_t dfn);
 
 /* device table functions */
 int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -575,6 +575,9 @@ static void cf_check parse_event_log_ent
                (flags & 0x002) ? " NX" : "",
                (flags & 0x001) ? " GN" : "");
 
+        if ( iommu_verbose )
+            amd_iommu_print_entries(iommu, device_id, daddr_to_dfn(addr));
+
         for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
             if ( get_dma_requestor_id(iommu->seg, bdf) == device_id )
                 pci_check_disable_device(iommu->seg, PCI_BUS(bdf),
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -428,6 +428,50 @@ int cf_check amd_iommu_unmap_page(
     return 0;
 }
 
+void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int 
dev_id,
+                             dfn_t dfn)
+{
+    mfn_t pt_mfn;
+    unsigned int level;
+    const struct amd_iommu_dte *dt = iommu->dev_table.buffer;
+
+    if ( !dt[dev_id].tv )
+    {
+        printk("%pp: no root\n", &PCI_SBDF(iommu->seg, dev_id));
+        return;
+    }
+
+    pt_mfn = _mfn(dt[dev_id].pt_root);
+    level = dt[dev_id].paging_mode;
+    printk("%pp root @ %"PRI_mfn" (%u levels) dfn=%"PRI_dfn"\n",
+           &PCI_SBDF(iommu->seg, dev_id), mfn_x(pt_mfn), level, dfn_x(dfn));
+
+    while ( level )
+    {
+        const union amd_iommu_pte *pt = map_domain_page(pt_mfn);
+        unsigned int idx = pfn_to_pde_idx(dfn_x(dfn), level);
+        union amd_iommu_pte pte = pt[idx];
+
+        unmap_domain_page(pt);
+
+        printk("  L%u[%03x] = %"PRIx64" %c%c\n", level, idx, pte.raw,
+               pte.pr ? pte.ir ? 'r' : '-' : 'n',
+               pte.pr ? pte.iw ? 'w' : '-' : 'p');
+
+        if ( !pte.pr )
+            break;
+
+        if ( pte.next_level >= level )
+        {
+            printk("  L%u[%03x]: next: %u\n", level, idx, pte.next_level);
+            break;
+        }
+
+        pt_mfn = _mfn(pte.mfn);
+        level = pte.next_level;
+    }
+}
+
 static unsigned long flush_count(unsigned long dfn, unsigned long page_count,
                                  unsigned int order)
 {
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -724,10 +724,11 @@ static void amd_dump_page_table_level(st
                 mfn_to_page(_mfn(pde->mfn)), pde->next_level,
                 address, indent + 1);
         else
-            printk("%*sdfn: %08lx  mfn: %08lx\n",
+            printk("%*sdfn: %08lx  mfn: %08lx  %c%c\n",
                    indent, "",
                    (unsigned long)PFN_DOWN(address),
-                   (unsigned long)PFN_DOWN(pfn_to_paddr(pde->mfn)));
+                   (unsigned long)PFN_DOWN(pfn_to_paddr(pde->mfn)),
+                   pde->ir ? 'r' : '-', pde->iw ? 'w' : '-');
     }
 
     unmap_domain_page(table_vaddr);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.