WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH V2] passthrough: deliver IRQs even if VCPU#0 is halte

To: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH V2] passthrough: deliver IRQs even if VCPU#0 is halted
From: Kouya Shimura <kouya@xxxxxxxxxxxxxx>
Date: Fri, 10 Jul 2009 15:34:59 +0900
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Thu, 09 Jul 2009 23:35:37 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Essentially nothing is changed from my previous patch.
Diffs are:
- split hvm_dirq_assist() for readability
- a little bit efficent for MP race condition

The attached patch is not so readable. 
I'll show the following handmade diff for human.
=======================================================================
 void hvm_dirq_assist(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
-    unsigned int irq;
+    unsigned int irq, i, dirq_mask_size;
+    unsigned long mask;
 
-    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
+    if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
         return;
 
-    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
-          irq < d->nr_pirqs;
-          irq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, irq + 1) )
-    {
-        if ( !test_and_clear_bit(irq, hvm_irq_dpci->dirq_mask) )
-            continue;
-
-       __hvm_dirq_assist(d, hvm_irq_dpci, irq);
-    }
+    dirq_mask_size = BITS_TO_LONGS(d->nr_pirqs);
+    for (i = 0; i < dirq_mask_size; i++)
+        mask = xchg(&hvm_irq_dpci->dirq_mask[i], 0L);
+
+        while ( mask != 0 )
+        {
+            irq = find_first_set_bit(mask);
+            mask &= ~(1UL << irq);
+            irq += (i * BITS_PER_LONG);
+            if ( irq < d->nr_pirqs )
+                __hvm_dirq_assist(d, hvm_irq_dpci, irq);
+        }
+    }
 }
=======================================================================

while d->nr_pirqs<=64, dirq_mask_size is 1 (d->nr_pirqs is normally 32).
Then the efficiency is not so worse than comparing v->vcpu_id != 0.

Thanks,
Kouya


# HG changeset patch
# User Kouya Shimura <kouya@xxxxxxxxxxxxxx>
# Date 1247204881 -32400
# Node ID 7b1506cac37df608120ec665cef3dd3bb1ddc94a
# Parent  20743a0a4ac5c9969beebd7bd414ace7de030cfd
passthrough: deliver IRQs even if VCPU#0 is halted

Signed-off-by: Kouya Shimura <kouya@xxxxxxxxxxxxxx>

diff -r 20743a0a4ac5 -r 7b1506cac37d xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Thu Jul 09 17:06:40 2009 +0100
+++ b/xen/drivers/passthrough/io.c      Fri Jul 10 14:48:01 2009 +0900
@@ -362,6 +362,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
 int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
 {
     struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+    struct vcpu *v;
 
     ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
     if ( !iommu_enabled || (d == dom0) || !dpci ||
@@ -378,7 +379,14 @@ int hvm_do_IRQ_dpci(struct domain *d, un
     if ( pt_irq_need_timer(dpci->mirq[mirq].flags) )
         set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
                   NOW() + PT_IRQ_TIME_OUT);
-    vcpu_kick(d->vcpu[0]);
+
+    /* Kick an online VCPU. mostly VCPU#0 except kdump case. */
+    for_each_vcpu(d, v)
+        if ( !test_bit(_VPF_down, &v->pause_flags) )
+        {
+            vcpu_kick(v);
+            break;
+        }
 
     return 1;
 }
@@ -429,63 +437,76 @@ static int hvm_pci_msi_assert(struct dom
 }
 #endif
 
+static void __hvm_dirq_assist(
+    struct domain *d, struct hvm_irq_dpci *hvm_irq_dpci, unsigned int irq)
+{
+    uint32_t device, intx;
+    struct dev_intx_gsi_link *digl;
+
+    spin_lock(&d->event_lock);
+#ifdef SUPPORT_MSI_REMAPPING
+    if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_GUEST_MSI )
+    {
+        hvm_pci_msi_assert(d, irq);
+        spin_unlock(&d->event_lock);
+        return;
+    }
+#endif
+    if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
+        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
+
+    list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
+    {
+        device = digl->device;
+        intx = digl->intx;
+        hvm_pci_intx_assert(d, device, intx);
+        hvm_irq_dpci->mirq[irq].pending++;
+
+#ifdef SUPPORT_MSI_REMAPPING
+        if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_TRANSLATE )
+        {
+            /* for translated MSI to INTx, eoi as early as possible */
+            __msi_pirq_eoi(d, irq);
+        }
+#endif
+    }
+
+    /*
+     * Set a timer to see if the guest can finish the interrupt or not. For
+     * example, the guest OS may unmask the PIC during boot, before the
+     * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
+     * guest will never deal with the irq, then the physical interrupt line
+     * will never be deasserted.
+     */
+    if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
+        set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
+                  NOW() + PT_IRQ_TIME_OUT);
+    spin_unlock(&d->event_lock);
+}
+
 void hvm_dirq_assist(struct vcpu *v)
 {
-    unsigned int irq;
-    uint32_t device, intx;
     struct domain *d = v->domain;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
-    struct dev_intx_gsi_link *digl;
+    unsigned int irq, i, dirq_mask_size;
+    unsigned long mask;
 
-    if ( !iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
+    if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
         return;
 
-    for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
-          irq < d->nr_pirqs;
-          irq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, irq + 1) )
+    dirq_mask_size = BITS_TO_LONGS(d->nr_pirqs);
+    for (i = 0; i < dirq_mask_size; i++)
     {
-        if ( !test_and_clear_bit(irq, hvm_irq_dpci->dirq_mask) )
-            continue;
+        mask = xchg(&hvm_irq_dpci->dirq_mask[i], 0L);
 
-        spin_lock(&d->event_lock);
-#ifdef SUPPORT_MSI_REMAPPING
-        if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_GUEST_MSI )
+        while ( mask != 0 )
         {
-            hvm_pci_msi_assert(d, irq);
-            spin_unlock(&d->event_lock);
-            continue;
+            irq = find_first_set_bit(mask);
+            mask &= ~(1UL << irq);
+            irq += (i * BITS_PER_LONG);
+            if ( irq < d->nr_pirqs )
+                __hvm_dirq_assist(d, hvm_irq_dpci, irq);
         }
-#endif
-        if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
-            stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
-
-        list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
-        {
-            device = digl->device;
-            intx = digl->intx;
-            hvm_pci_intx_assert(d, device, intx);
-            hvm_irq_dpci->mirq[irq].pending++;
-
-#ifdef SUPPORT_MSI_REMAPPING
-            if ( hvm_irq_dpci->mirq[irq].flags & HVM_IRQ_DPCI_TRANSLATE )
-            {
-                /* for translated MSI to INTx interrupt, eoi as early as 
possible */
-                __msi_pirq_eoi(d, irq);
-            }
-#endif
-        }
-
-        /*
-         * Set a timer to see if the guest can finish the interrupt or not. For
-         * example, the guest OS may unmask the PIC during boot, before the
-         * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
-         * guest will never deal with the irq, then the physical interrupt line
-         * will never be deasserted.
-         */
-        if ( pt_irq_need_timer(hvm_irq_dpci->mirq[irq].flags) )
-            set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
-                      NOW() + PT_IRQ_TIME_OUT);
-        spin_unlock(&d->event_lock);
     }
 }
 
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>