[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5 of 6] Add iotlb invalidation command for amd iommu



# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1319201423 -7200
# Node ID 6009089b0511c843f54435d02dcd72394d7e164f
# Parent  f2fbc041f4a710b66f98e76ad8905dcac7920c18
Add iotlb invalidation command for amd iommu

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>

diff -r f2fbc041f4a7 -r 6009089b0511 xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Fri Oct 21 14:50:22 2011 +0200
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Fri Oct 21 14:50:23 2011 +0200
@@ -128,6 +128,75 @@ static void invalidate_iommu_pages(struc
     send_iommu_command(iommu, cmd);
 }
 
+static void invalidate_iotlb_pages(struct amd_iommu *iommu,
+                                   u16 maxpend, u32 pasid, u16 queueid,
+                                   u64 io_addr, u16 dev_id, u16 order)
+{
+    u64 addr_lo, addr_hi;
+    u32 cmd[4], entry;
+    int sflag = 0;
+
+    ASSERT ( order == 0 || order == 9 || order == 18 );
+
+    if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) )
+        sflag = 1;
+
+    /* If sflag == 1, the size of the invalidate command is determined
+     by the first zero bit in the address starting from Address[12] */
+    if ( order )
+    {
+        u64 mask = 1ULL << (order - 1 + PAGE_SHIFT);
+        io_addr &= ~mask;
+        io_addr |= mask - 1;
+    }
+
+    addr_lo = io_addr & DMA_32BIT_MASK;
+    addr_hi = io_addr >> 32;
+
+    set_field_in_reg_u32(dev_id, 0,
+                         IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK,
+                         IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry);
+
+    set_field_in_reg_u32(maxpend, entry,
+                         IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK,
+                         IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry);
+
+    set_field_in_reg_u32(pasid & 0xff, entry,
+                         IOMMU_INV_IOTLB_PAGES_PASID1_MASK,
+                         IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry);
+    cmd[0] = entry;
+
+    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0,
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
+
+    set_field_in_reg_u32(pasid >> 8, entry,
+                         IOMMU_INV_IOTLB_PAGES_PASID2_MASK, 
+                         IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT,
+                         &entry);
+
+    set_field_in_reg_u32(queueid, entry,
+                         IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK, 
+                         IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT,
+                         &entry);
+    cmd[1] = entry;
+
+    set_field_in_reg_u32(sflag, 0,
+                         IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK,
+                         IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry);
+
+    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
+                         IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK,
+                         IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry);
+    cmd[2] = entry;
+
+    set_field_in_reg_u32((u32)addr_hi, 0,
+                         IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK,
+                         IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry);
+    cmd[3] = entry;
+
+    send_iommu_command(iommu, cmd);
+}
 void flush_command_buffer(struct amd_iommu *iommu)
 {
     u32 cmd[4], status;
@@ -896,6 +965,62 @@ int amd_iommu_reserve_domain_unity_map(s
     return 0;
 }
 
+void amd_iommu_flush_iotlb(struct pci_ats_dev *pdev,
+                           uint64_t gaddr, unsigned int order)
+{
+    unsigned long flags;
+    struct amd_iommu *iommu;
+    unsigned int bdf, req_id, queueid, maxpend;
+
+    if ( !ats_enabled )
+        return;
+
+    bdf = (pdev->bus << 8) | pdev->devfn;
+    iommu = find_iommu_for_device(pdev->seg, bdf);
+
+    if ( !iommu )
+    {
+        AMD_IOMMU_DEBUG("%s: Fail to find iommu for device 
%04x:%02x:%02x.%u\n",
+                        __func__, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
+                        PCI_FUNC(pdev->devfn));
+        return;
+    }
+
+    if ( !iommu->iotlb_support )
+        return;
+
+    req_id = get_dma_requestor_id(iommu->seg, bdf);
+    queueid = req_id;
+    maxpend = (pdev->ats_queue_depth + 32) & 0xff;
+
+    /* send INVALIDATE_IOTLB_PAGES command */
+    spin_lock_irqsave(&iommu->lock, flags);
+    invalidate_iotlb_pages(iommu, maxpend, 0, queueid,
+                           gaddr, req_id, order);
+    flush_command_buffer(iommu);
+    spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void amd_iommu_flush_all_iotlbs(struct domain *d, 
+                                uint64_t gaddr, unsigned int order)
+{
+    struct pci_ats_dev *ats_pdev;
+    struct pci_dev *pdev;
+
+    if ( !ats_enabled )
+        return;
+
+    for_each_pdev( d, pdev )
+    {
+        ats_pdev = get_ats_device(pdev->seg, pdev->bus, pdev->devfn);
+        if ( ats_pdev == NULL )
+            continue;
+        if ( !pci_ats_enabled(ats_pdev->seg, ats_pdev->bus, ats_pdev->devfn) )
+            continue;
+
+        amd_iommu_flush_iotlb(ats_pdev, gaddr, order);
+    }
+}
 
 /* Flush iommu cache after p2m changes. */
 static void _amd_iommu_flush_pages(struct domain *d,
@@ -914,6 +1039,9 @@ static void _amd_iommu_flush_pages(struc
         flush_command_buffer(iommu);
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
+
+    if ( ats_enabled )
+        amd_iommu_flush_all_iotlbs(d, gaddr, order);
 }
 
 void amd_iommu_flush_all_pages(struct domain *d)
diff -r f2fbc041f4a7 -r 6009089b0511 
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri Oct 21 14:50:22 
2011 +0200
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h      Fri Oct 21 14:50:23 
2011 +0200
@@ -233,6 +233,24 @@
 #define IOMMU_INV_INT_TABLE_DEVICE_ID_MASK   0x0000FFFF
 #define IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT  0
 
+/* INVALIDATE_IOTLB_PAGES command */
+#define IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK          0xff000000
+#define IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT         24
+#define IOMMU_INV_IOTLB_PAGES_PASID1_MASK           0x00ff0000
+#define IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT          16
+#define IOMMU_INV_IOTLB_PAGES_PASID2_MASK           0x0fff0000
+#define IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT          16
+#define IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK          0x0000ffff
+#define IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT         0
+#define IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK        0x0000FFFF
+#define IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT       0
+#define IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK         0xFFFFF000
+#define IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT        12
+#define IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK        0xFFFFFFFF
+#define IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT       0
+#define IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK           0x00000001
+#define IOMMU_INV_IOTLB_PAGES_S_FLAG_SHIFT          0
+
 /* Event Log */
 #define IOMMU_EVENT_LOG_BASE_LOW_OFFSET                0x10
 #define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET       0x14
diff -r f2fbc041f4a7 -r 6009089b0511 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Oct 21 14:50:22 
2011 +0200
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Fri Oct 21 14:50:23 
2011 +0200
@@ -55,6 +55,8 @@ int amd_iommu_unmap_page(struct domain *
 void amd_iommu_flush_pages(struct domain *d, unsigned long gfn,
                            unsigned int order);
 void amd_iommu_flush_all_pages(struct domain *d);
+void amd_iommu_flush_iotlb(struct pci_ats_dev *pdev,
+                           uint64_t gaddr, unsigned int order);
 
 u64 amd_iommu_get_next_table_from_pte(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
@@ -165,4 +167,6 @@ static inline void __free_amd_iommu_tabl
     free_xenheap_pages(table, order);
 }
 
+extern bool_t ats_enabled;
+
 #endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.