[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] [IOMMU] queued invalidation clean up




This patch cleans up queued invalidation, including round wrap check, multiple polling status and other minor changes.

Signed-Off-By: Zhai Edwin <edwin.zhai@xxxxxxxxx>
Reviewed-By:   Allen Kay <allen.m.kay@xxxxxxxxx>


--
best rgds,
edwin

Index: hv/xen/drivers/passthrough/vtd/iommu.c
===================================================================
--- hv.orig/xen/drivers/passthrough/vtd/iommu.c
+++ hv/xen/drivers/passthrough/vtd/iommu.c
@@ -90,7 +90,6 @@ static struct intel_iommu *alloc_intel_i
     memset(intel, 0, sizeof(struct intel_iommu));
 
     spin_lock_init(&intel->qi_ctrl.qinval_lock);
-    spin_lock_init(&intel->qi_ctrl.qinval_poll_lock);
     spin_lock_init(&intel->ir_ctrl.iremap_lock);
 
     return intel;
Index: hv/xen/drivers/passthrough/vtd/iommu.h
===================================================================
--- hv.orig/xen/drivers/passthrough/vtd/iommu.h
+++ hv/xen/drivers/passthrough/vtd/iommu.h
@@ -392,14 +392,20 @@ struct qinval_entry {
     }q;
 };
 
-struct poll_info {
-    u64 saddr;
-    u32 udata;
-};
+/* Order of queue invalidation pages */
+#define IQA_REG_QS       0
+#define NUM_QINVAL_PAGES (1 << IQA_REG_QS)
+
+/* Each entry is 16 byte */
+#define QINVAL_ENTRY_NR  (1 << (IQA_REG_QS + 8))
+
+/* Status data flag */
+#define QINVAL_STAT_INIT  0
+#define QINVAL_STAT_DONE  1
+
+/* Queue invalidation head/tail shift */
+#define QINVAL_INDEX_SHIFT 4
 
-#define NUM_QINVAL_PAGES 1
-#define IQA_REG_QS       0    // derived from NUM_QINVAL_PAGES per VT-d spec.
-#define QINVAL_ENTRY_NR (PAGE_SIZE_4K*NUM_QINVAL_PAGES/sizeof(struct 
qinval_entry))
 #define qinval_present(v) ((v).lo & 1)
 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
 
@@ -441,8 +447,7 @@ struct qi_ctrl {
     u64 qinval_maddr;  /* queue invalidation page machine address */
     int qinval_index;                    /* queue invalidation index */
     spinlock_t qinval_lock;      /* lock for queue invalidation page */
-    spinlock_t qinval_poll_lock; /* lock for queue invalidation poll addr */
-    volatile u32 qinval_poll_status;     /* used by poll methord to sync */
+    volatile u32 qinval_poll_status[MAX_VIRT_CPUS]; /* for sync by polling */
 };
 
 struct ir_ctrl {
Index: hv/xen/drivers/passthrough/vtd/qinval.c
===================================================================
--- hv.orig/xen/drivers/passthrough/vtd/qinval.c
+++ hv/xen/drivers/passthrough/vtd/qinval.c
@@ -45,18 +45,29 @@ static void print_qi_regs(struct iommu *
 
 static int qinval_next_index(struct iommu *iommu)
 {
-    u64 val;
-    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
-    return (val >> 4);
+    u64 tail, head;
+
+    tail = dmar_readq(iommu->reg, DMAR_IQT_REG);
+    tail >>= QINVAL_INDEX_SHIFT;
+
+    head = dmar_readq(iommu->reg, DMAR_IQH_REG);
+    head >>= QINVAL_INDEX_SHIFT;
+
+    /* round wrap check */
+    if ( ( tail + 1 ) % QINVAL_ENTRY_NR == head  )
+        return -1;
+
+    return tail;
 }
 
 static int qinval_update_qtail(struct iommu *iommu, int index)
 {
     u64 val;
 
-    /* Need an ASSERT to insure that we have got register lock */
-    val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
-    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
+    /* Need hold register lock when update tail */
+    ASSERT( spin_is_locked(&iommu->register_lock) );
+    val = (index + 1) % QINVAL_ENTRY_NR;
+    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
     return 0;
 }
 
@@ -146,6 +157,8 @@ int queue_invalidate_iotlb(struct iommu 
     spin_lock_irqsave(&iommu->register_lock, flags);
 
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
                             am, ih, addr);
     ret |= qinval_update_qtail(iommu, index);
@@ -180,29 +193,33 @@ static int gen_wait_dsc(struct iommu *io
 }
 
 static int queue_invalidate_wait(struct iommu *iommu,
-    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
+    u8 iflag, u8 sw, u8 fn)
 {
-    unsigned long flags;
     s_time_t start_time;
+    volatile u32 *saddr;
     int index = -1;
     int ret = -1;
+    unsigned long flags;
     struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);
 
-    spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
-    spin_lock(&iommu->register_lock);
+    spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
-    if ( *saddr == 1 )
-        *saddr = 0;
-    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
+    if ( index == -1 )
+        return -EBUSY;
+    saddr = &qi_ctrl->qinval_poll_status[current->vcpu_id];
+
+    if ( *saddr == QINVAL_STAT_DONE )
+        *saddr = QINVAL_STAT_INIT;
+    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, QINVAL_STAT_DONE, saddr);
     ret |= qinval_update_qtail(iommu, index);
-    spin_unlock(&iommu->register_lock);
+    spin_unlock_irqrestore(&iommu->register_lock, flags);
 
     /* Now we don't support interrupt method */
     if ( sw )
     {
         /* In case all wait descriptor writes to same addr with same data */
         start_time = NOW();
-        while ( *saddr != 1 )
+        while ( *saddr != QINVAL_STAT_DONE )
         {
             if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
             {
@@ -212,7 +229,6 @@ static int queue_invalidate_wait(struct 
             cpu_relax();
         }
     }
-    spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
     return ret;
 }
 
@@ -223,8 +239,7 @@ int invalidate_sync(struct iommu *iommu)
 
     if ( qi_ctrl->qinval_maddr != 0 )
     {
-        ret = queue_invalidate_wait(iommu,
-            0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
+        ret = queue_invalidate_wait(iommu, 0, 1, 1);
         return ret;
     }
     return 0;
@@ -269,6 +284,8 @@ int qinval_device_iotlb(struct iommu *io
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
                                 sid, size, addr);
     ret |= qinval_update_qtail(iommu, index);
@@ -311,6 +328,8 @@ int queue_invalidate_iec(struct iommu *i
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     index = qinval_next_index(iommu);
+    if ( index == -1 )
+        return -EBUSY;
     ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
     ret |= qinval_update_qtail(iommu, index);
     spin_unlock_irqrestore(&iommu->register_lock, flags);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.