WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] vt-d: Support intra-domain shared interru

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] vt-d: Support intra-domain shared interrupt.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Nov 2007 04:20:58 -0800
Delivery-date: Fri, 09 Nov 2007 05:30:40 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1194448806 0
# Node ID 4fd6610949f1e3ee3286dead75411bfb1a916d8e
# Parent  644e7577f6ee00f746a63a63ca16284cc31f9ee8
vt-d: Support intra-domain shared interrupt.

Inter-domain shared interrupt has been supported by timeout method,
but it still doesn't support intra-domain shared interrupt, that is
assigning multiple devices which share a physical irq to the same
domain. This patch implements intra-domain shared interrupt
support. In addition, this patch maps link to guest device/intx
instead of directly mapping isairq in pt_irq_create_bind_vtd(),
because at this point the isairqs got from pci_link are always 0.

Note that assigning multiple devices to guests which uses PIC to
handle interrupts may be failed, because different links possibly
connect to same irq.

Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
---
 xen/arch/x86/hvm/irq.c        |   13 +---
 xen/arch/x86/hvm/vmx/intr.c   |   17 ++++-
 xen/arch/x86/hvm/vmx/vtd/io.c |  121 ++++++++++++++++++++++++++++--------------
 xen/include/asm-x86/hvm/irq.h |   29 +++++++---
 4 files changed, 119 insertions(+), 61 deletions(-)

diff -r 644e7577f6ee -r 4fd6610949f1 xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c    Wed Nov 07 14:53:32 2007 +0000
+++ b/xen/arch/x86/hvm/irq.c    Wed Nov 07 15:20:06 2007 +0000
@@ -192,15 +192,12 @@ void hvm_set_pci_link_route(struct domai
     hvm_irq->pci_link.route[link] = isa_irq;
 
     /* PCI pass-through fixup. */
-    if ( hvm_irq->dpci && hvm_irq->dpci->girq[old_isa_irq].valid )
-    {
-        uint32_t device = hvm_irq->dpci->girq[old_isa_irq].device;
-        uint32_t intx = hvm_irq->dpci->girq[old_isa_irq].intx;
-        if ( link == hvm_pci_intx_link(device, intx) )
-        {
-            hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->girq[old_isa_irq];
+    if ( hvm_irq->dpci && hvm_irq->dpci->link[link].valid )
+    {
+        hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->link[link];
+        if ( hvm_irq->dpci->girq[old_isa_irq].device ==
+             hvm_irq->dpci->link[link].device )
             hvm_irq->dpci->girq[old_isa_irq].valid = 0;
-        }
     }
 
     if ( hvm_irq->pci_link_assert_count[link] == 0 )
diff -r 644e7577f6ee -r 4fd6610949f1 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Wed Nov 07 14:53:32 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/intr.c       Wed Nov 07 15:20:06 2007 +0000
@@ -113,6 +113,7 @@ static void vmx_dirq_assist(struct vcpu 
     uint32_t device, intx;
     struct domain *d = v->domain;
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
+    struct dev_intx_gsi *dig;
 
     if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) )
         return;
@@ -122,11 +123,17 @@ static void vmx_dirq_assist(struct vcpu 
           irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) )
     {
         stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
-
-        test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask);
-        device = hvm_irq_dpci->mirq[irq].device;
-        intx = hvm_irq_dpci->mirq[irq].intx;
-        hvm_pci_intx_assert(d, device, intx);
+        clear_bit(irq, &hvm_irq_dpci->dirq_mask);
+
+        list_for_each_entry ( dig, &hvm_irq_dpci->mirq[irq].dig_list, list )
+        {
+            device = dig->device;
+            intx = dig->intx;
+            hvm_pci_intx_assert(d, device, intx);
+            spin_lock(&hvm_irq_dpci->dirq_lock);
+            hvm_irq_dpci->mirq[irq].pending++;
+            spin_unlock(&hvm_irq_dpci->dirq_lock);
+        }
 
         /*
          * Set a timer to see if the guest can finish the interrupt or not. For
diff -r 644e7577f6ee -r 4fd6610949f1 xen/arch/x86/hvm/vmx/vtd/io.c
--- a/xen/arch/x86/hvm/vmx/vtd/io.c     Wed Nov 07 14:53:32 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/io.c     Wed Nov 07 15:20:06 2007 +0000
@@ -47,14 +47,27 @@
 
 static void pt_irq_time_out(void *data)
 {
-    struct hvm_irq_dpci_mapping *irq_map = data;
-    unsigned int guest_gsi, machine_gsi;
-    struct domain *d = irq_map->dom;
-
-    guest_gsi = irq_map->guest_gsi;
-    machine_gsi = d->arch.hvm_domain.irq.dpci->girq[guest_gsi].machine_gsi;
-    clear_bit(machine_gsi, d->arch.hvm_domain.irq.dpci->dirq_mask);
-    hvm_dpci_eoi(irq_map->dom, guest_gsi, NULL);
+    struct hvm_mirq_dpci_mapping *irq_map = data;
+    unsigned int guest_gsi, machine_gsi = 0;
+    struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci;
+    struct dev_intx_gsi *dig;
+    uint32_t device, intx;
+
+    list_for_each_entry ( dig, &irq_map->dig_list, list )
+    {
+        guest_gsi = dig->gsi;
+        machine_gsi = dpci->girq[guest_gsi].machine_gsi;
+        device = dig->device;
+        intx = dig->intx;
+        hvm_pci_intx_deassert(irq_map->dom, device, intx);
+    }
+
+    clear_bit(machine_gsi, dpci->dirq_mask);
+    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+    spin_lock(&dpci->dirq_lock);
+    dpci->mirq[machine_gsi].pending = 0;
+    spin_unlock(&dpci->dirq_lock);
+    pirq_guest_eoi(irq_map->dom, machine_gsi);
 }
 
 int pt_irq_create_bind_vtd(
@@ -62,8 +75,8 @@ int pt_irq_create_bind_vtd(
 {
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t machine_gsi, guest_gsi;
-    uint32_t device, intx;
-    uint32_t link, isa_irq;
+    uint32_t device, intx, link;
+    struct dev_intx_gsi *dig;
 
     if ( hvm_irq_dpci == NULL )
     {
@@ -72,6 +85,9 @@ int pt_irq_create_bind_vtd(
             return -ENOMEM;
 
         memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
+        spin_lock_init(&hvm_irq_dpci->dirq_lock);
+        for ( int i = 0; i < NR_IRQS; i++ )
+            INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].dig_list);
 
         if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci,
                      0, (unsigned long)hvm_irq_dpci) != 0 )
@@ -85,35 +101,42 @@ int pt_irq_create_bind_vtd(
     intx = pt_irq_bind->u.pci.intx;
     guest_gsi = hvm_pci_intx_gsi(device, intx);
     link = hvm_pci_intx_link(device, intx);
-    isa_irq = d->arch.hvm_domain.irq.pci_link.route[link];
-
-    hvm_irq_dpci->mirq[machine_gsi].valid = 1;
-    hvm_irq_dpci->mirq[machine_gsi].device = device;
-    hvm_irq_dpci->mirq[machine_gsi].intx = intx;
-    hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi;
-    hvm_irq_dpci->mirq[machine_gsi].dom = d;
-
+
+    dig = xmalloc(struct dev_intx_gsi);
+    if ( !dig )
+        return -ENOMEM;
+
+    dig->device = device;
+    dig->intx = intx;
+    dig->gsi = guest_gsi;
+    list_add_tail(&dig->list,
+                  &hvm_irq_dpci->mirq[machine_gsi].dig_list);
+ 
     hvm_irq_dpci->girq[guest_gsi].valid = 1;
     hvm_irq_dpci->girq[guest_gsi].device = device;
     hvm_irq_dpci->girq[guest_gsi].intx = intx;
     hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
-    hvm_irq_dpci->girq[guest_gsi].dom = d;
-
-    hvm_irq_dpci->girq[isa_irq].valid = 1;
-    hvm_irq_dpci->girq[isa_irq].device = device;
-    hvm_irq_dpci->girq[isa_irq].intx = intx;
-    hvm_irq_dpci->girq[isa_irq].machine_gsi = machine_gsi;
-    hvm_irq_dpci->girq[isa_irq].dom = d;
-
-    init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
-               pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
-
-    /* Deal with GSI for legacy devices. */
-    pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
-    gdprintk(XENLOG_ERR,
-             "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n",
+
+    hvm_irq_dpci->link[link].valid = 1;
+    hvm_irq_dpci->link[link].device = device;
+    hvm_irq_dpci->link[link].intx = intx;
+    hvm_irq_dpci->link[link].machine_gsi = machine_gsi;
+
+    /* Bind the same mirq once in the same domain */
+    if ( !hvm_irq_dpci->mirq[machine_gsi].valid )
+    {
+        hvm_irq_dpci->mirq[machine_gsi].valid = 1;
+        hvm_irq_dpci->mirq[machine_gsi].dom = d;
+
+        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
+                   pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
+        /* Deal with gsi for legacy devices */
+        pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
+    }
+
+    gdprintk(XENLOG_INFO,
+             "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
              machine_gsi, device, intx);
-
     return 0;
 }
 
@@ -150,14 +173,22 @@ void hvm_dpci_eoi(struct domain *d, unsi
         return;
 
     machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
-    stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
     device = hvm_irq_dpci->girq[guest_gsi].device;
     intx = hvm_irq_dpci->girq[guest_gsi].intx;
-    gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n",
-             device, intx);
     hvm_pci_intx_deassert(d, device, intx);
-    if ( (ent == NULL) || !ent->fields.mask )
-        pirq_guest_eoi(d, machine_gsi);
+ 
+    spin_lock(&hvm_irq_dpci->dirq_lock);
+    if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
+    {
+        spin_unlock(&hvm_irq_dpci->dirq_lock);
+
+        gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
+        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+        if ( (ent == NULL) || !ent->fields.mask )
+            pirq_guest_eoi(d, machine_gsi);
+    }
+    else
+        spin_unlock(&hvm_irq_dpci->dirq_lock);
 }
 
 void iommu_domain_destroy(struct domain *d)
@@ -165,8 +196,9 @@ void iommu_domain_destroy(struct domain 
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t i;
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
-    struct list_head *ioport_list, *tmp;
+    struct list_head *ioport_list, *dig_list, *tmp;
     struct g2m_ioport *ioport;
+    struct dev_intx_gsi *dig;
 
     if ( !vtd_enabled )
         return;
@@ -178,7 +210,16 @@ void iommu_domain_destroy(struct domain 
             {
                 pirq_guest_unbind(d, i);
                 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
+
+                list_for_each_safe ( dig_list, tmp,
+                                     &hvm_irq_dpci->mirq[i].dig_list )
+                {
+                    dig = list_entry(dig_list, struct dev_intx_gsi, list);
+                    list_del(&dig->list);
+                    xfree(dig);
+                }
             }
+
         d->arch.hvm_domain.irq.dpci = NULL;
         xfree(hvm_irq_dpci);
     }
diff -r 644e7577f6ee -r 4fd6610949f1 xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h     Wed Nov 07 14:53:32 2007 +0000
+++ b/xen/include/asm-x86/hvm/irq.h     Wed Nov 07 15:20:06 2007 +0000
@@ -30,22 +30,35 @@
 #include <asm/hvm/vioapic.h>
 #include <public/hvm/save.h>
 
-struct hvm_irq_dpci_mapping {
+struct dev_intx_gsi {
+    struct list_head list;
+    uint8_t device;
+    uint8_t intx;
+    uint8_t gsi;
+};
+
+struct hvm_mirq_dpci_mapping {
+    uint8_t valid;
+    int pending;
+    struct list_head dig_list;
+    struct domain *dom;
+};
+
+struct hvm_girq_dpci_mapping {
     uint8_t valid;
     uint8_t device;
     uint8_t intx;
-    struct domain *dom;
-    union {
-        uint8_t guest_gsi;
-        uint8_t machine_gsi;
-    };
+    uint8_t machine_gsi;
 };
 
 struct hvm_irq_dpci {
+    spinlock_t dirq_lock;
     /* Machine IRQ to guest device/intx mapping. */
-    struct hvm_irq_dpci_mapping mirq[NR_IRQS];
+    struct hvm_mirq_dpci_mapping mirq[NR_IRQS];
     /* Guest IRQ to guest device/intx mapping. */
-    struct hvm_irq_dpci_mapping girq[NR_IRQS];
+    struct hvm_girq_dpci_mapping girq[NR_IRQS];
+    /* Link to guest device/intx mapping. */
+    struct hvm_girq_dpci_mapping link[4];
     DECLARE_BITMAP(dirq_mask, NR_IRQS);
     struct timer hvm_timer[NR_IRQS];
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] vt-d: Support intra-domain shared interrupt., Xen patchbot-unstable <=