[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch RFC 02/13] vt-d: Register MSI for async invalidation completion interrupt.



Signed-off-by: Quan Xu <quan.xu@xxxxxxxxx>
---
 xen/drivers/passthrough/vtd/iommu.c | 133 ++++++++++++++++++++++++++++++++++++
 xen/drivers/passthrough/vtd/iommu.h |  10 +++
 2 files changed, 143 insertions(+)

diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index 17bfb76..db6e3a2 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -54,6 +54,7 @@ bool_t __read_mostly untrusted_msi;
 int nr_iommus;
 
 static struct tasklet vtd_fault_tasklet;
+static struct tasklet vtd_qi_tasklet;
 
 static int setup_hwdom_device(u8 devfn, struct pci_dev *);
 static void setup_hwdom_rmrr(struct domain *d);
@@ -1068,6 +1069,125 @@ static hw_irq_controller dma_msi_type = {
     .set_affinity = dma_msi_set_affinity,
 };
 
+/* IOMMU Queued Invalidation(QI). */
+static void _qi_msi_unmask(struct iommu *iommu)
+{
+    u32 sts;
+    unsigned long flags;
+
+    /* Clear IM bit of DMAR_IECTL_REG. */
+    spin_lock_irqsave(&iommu->register_lock, flags);
+    sts = dmar_readl(iommu->reg, DMAR_IECTL_REG);
+    sts &= ~DMA_IECTL_IM;
+    dmar_writel(iommu->reg, DMAR_IECTL_REG, sts);
+    spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+static void _qi_msi_mask(struct iommu *iommu)
+{
+    u32 sts;
+    unsigned long flags;
+
+    /* Set IM bit of DMAR_IECTL_REG. */
+    spin_lock_irqsave(&iommu->register_lock, flags);
+    sts = dmar_readl(iommu->reg, DMAR_IECTL_REG);
+    sts |= DMA_IECTL_IM;
+    dmar_writel(iommu->reg, DMAR_IECTL_REG, sts);
+    spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+static void _do_iommu_qi(struct iommu *iommu)
+{
+}
+
+static void do_iommu_qi_completion(unsigned long data)
+{
+    struct acpi_drhd_unit *drhd;
+
+    if ( list_empty(&acpi_drhd_units) )
+    {
+       dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no iommu devices.\n");
+       return;
+    }
+
+    for_each_drhd_unit( drhd )
+        _do_iommu_qi(drhd->iommu);
+}
+
+static void iommu_qi_completion(int irq, void *dev_id,
+                                struct cpu_user_regs *regs)
+{
+    tasklet_schedule(&vtd_qi_tasklet);
+}
+
+static void qi_msi_unmask(struct irq_desc *desc)
+{
+    _qi_msi_unmask(desc->action->dev_id);
+}
+
+static void qi_msi_mask(struct irq_desc *desc)
+{
+    _qi_msi_mask(desc->action->dev_id);
+}
+
+static unsigned int qi_msi_startup(struct irq_desc *desc)
+{
+    qi_msi_unmask(desc);
+    return 0;
+}
+
+static void qi_msi_ack(struct irq_desc *desc)
+{
+    irq_complete_move(desc);
+    qi_msi_mask(desc);
+    move_masked_irq(desc);
+}
+
+static void qi_msi_end(struct irq_desc *desc, u8 vector)
+{
+    ack_APIC_irq();
+}
+
+static void qi_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+{
+    struct msi_msg msg;
+    unsigned int dest;
+    unsigned long flags;
+    struct iommu *iommu = desc->action->dev_id;
+
+    dest = set_desc_affinity(desc, mask);
+    if ( dest == BAD_APICID )
+    {
+        dprintk(XENLOG_ERR VTDPREFIX,
+                "IOMMU: Set invaldaiton interrupt affinity error!\n");
+        return;
+    }
+
+    msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
+    if ( x2apic_enabled )
+        msg.address_hi = dest & 0xFFFFFF00;
+    msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+    msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+    iommu->qi_msi.msg = msg;
+
+    spin_lock_irqsave(&iommu->register_lock, flags);
+    dmar_writel(iommu->reg, DMAR_IEDATA_REG, msg.data);
+    dmar_writel(iommu->reg, DMAR_IEADDR_REG, msg.address_lo);
+    dmar_writel(iommu->reg, DMAR_IEUADDR_REG, msg.address_hi);
+    spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
+static hw_irq_controller qi_msi_type = {
+    .typename = "QI_MSI",
+    .startup = qi_msi_startup,
+    .shutdown = qi_msi_mask,
+    .enable = qi_msi_unmask,
+    .disable = qi_msi_mask,
+    .ack = qi_msi_ack,
+    .end = qi_msi_end,
+    .set_affinity = qi_msi_set_affinity,
+};
+
 static int __init iommu_set_interrupt(struct acpi_drhd_unit *drhd,
     hw_irq_controller *irq_ctrl, const char *devname, struct msi_desc *msi,
     void (*irq_handler)(int, void *, struct cpu_user_regs *))
@@ -1123,6 +1243,7 @@ int __init iommu_alloc(struct acpi_drhd_unit *drhd)
         return -ENOMEM;
 
     iommu->msi.irq = -1; /* No irq assigned yet. */
+    iommu->qi_msi.irq = -1; /* No irq assigned yet. */
 
     iommu->intel = alloc_intel_iommu();
     if ( iommu->intel == NULL )
@@ -1228,6 +1349,9 @@ void __init iommu_free(struct acpi_drhd_unit *drhd)
     free_intel_iommu(iommu->intel);
     if ( iommu->msi.irq >= 0 )
         destroy_irq(iommu->msi.irq);
+    if ( iommu->qi_msi.irq >= 0 )
+        destroy_irq(iommu->qi_msi.irq);
+
     xfree(iommu);
 }
 
@@ -1985,6 +2109,9 @@ static void adjust_irq_affinity(struct acpi_drhd_unit 
*drhd)
          cpumask_intersects(&node_to_cpumask(node), cpumask) )
         cpumask = &node_to_cpumask(node);
     dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask);
+
+    if ( ats_enabled )
+        qi_msi_set_affinity(irq_to_desc(drhd->iommu->qi_msi.irq), cpumask);
 }
 
 int adjust_vtd_irq_affinities(void)
@@ -2183,6 +2310,11 @@ int __init intel_vtd_setup(void)
 
         ret = iommu_set_interrupt(drhd, &dma_msi_type, "dmar", 
&drhd->iommu->msi,
                                   iommu_page_fault);
+        if ( ats_enabled )
+            ret = iommu_set_interrupt(drhd, &qi_msi_type, "qi",
+                                      &drhd->iommu->qi_msi,
+                                      iommu_qi_completion);
+
         if ( ret )
         {
             dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: interrupt setup failed\n");
@@ -2191,6 +2323,7 @@ int __init intel_vtd_setup(void)
     }
 
     softirq_tasklet_init(&vtd_fault_tasklet, do_iommu_page_fault, 0);
+    softirq_tasklet_init(&vtd_qi_tasklet, do_iommu_qi_completion, 0);
 
     if ( !iommu_qinval && iommu_intremap )
     {
diff --git a/xen/drivers/passthrough/vtd/iommu.h 
b/xen/drivers/passthrough/vtd/iommu.h
index ac71ed1..52d328f 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -47,6 +47,11 @@
 #define    DMAR_IQH_REG    0x80    /* invalidation queue head */
 #define    DMAR_IQT_REG    0x88    /* invalidation queue tail */
 #define    DMAR_IQA_REG    0x90    /* invalidation queue addr */
+#define    DMAR_IECTL_REG  0xA0    /* invalidation event contrl register */
+#define    DMAR_IEDATA_REG 0xA4    /* invalidation event data register */
+#define    DMAR_IEADDR_REG 0xA8    /* invalidation event address register */
+#define    DMAR_IEUADDR_REG 0xAC   /* invalidation event upper address 
register */
+#define    DMAR_ICS_REG    0x9C    /* invalidation completion status register 
*/
 #define    DMAR_IRTA_REG   0xB8    /* intr remap */
 
 #define OFFSET_STRIDE        (9)
@@ -165,6 +170,10 @@
 /* FECTL_REG */
 #define DMA_FECTL_IM (((u64)1) << 31)
 
+/* IECTL_REG */
+#define DMA_IECTL_IM (((u64)1) << 31)
+
+
 /* FSTS_REG */
 #define DMA_FSTS_PFO ((u64)1 << 0)
 #define DMA_FSTS_PPF ((u64)1 << 1)
@@ -515,6 +524,7 @@ struct iommu {
     spinlock_t register_lock; /* protect iommu register handling */
     u64 root_maddr; /* root entry machine address */
     struct msi_desc msi;
+    struct msi_desc qi_msi;
     struct intel_iommu *intel;
     unsigned long *domid_bitmap;  /* domain id bitmap */
     u16 *domid_map;               /* domain id mapping array */
-- 
1.8.3.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.