WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] MSI 2/6: change the pirq to be per-domain

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] MSI 2/6: change the pirq to be per-domain
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 01 May 2008 03:00:15 -0700
Delivery-date: Thu, 01 May 2008 07:47:05 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1209634222 -3600
# Node ID 6ecbb00e58cd891fb3c26455bb096ed5fec0b0aa
# Parent  8bced3d8a90756540df18c356b9f1d66acac2b87
MSI 2/6: change the pirq to be per-domain

Signed-off-by: Jiang Yunhong <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
---
 tools/ioemu/hw/pass-through.c         |   14 +
 tools/libxc/xc_physdev.c              |   40 ++++
 tools/libxc/xc_private.h              |   30 +++
 tools/libxc/xenctrl.h                 |   11 +
 tools/python/xen/lowlevel/xc/xc.c     |   29 +++
 tools/python/xen/xend/server/irqif.py |    7 
 tools/python/xen/xend/server/pciif.py |    6 
 xen/arch/x86/domain.c                 |    2 
 xen/arch/x86/hvm/vmx/intr.c           |    4 
 xen/arch/x86/io_apic.c                |   19 ++
 xen/arch/x86/irq.c                    |   29 +--
 xen/arch/x86/physdev.c                |  302 +++++++++++++++++++++++++++++++++-
 xen/drivers/passthrough/io.c          |   12 -
 xen/drivers/passthrough/vtd/x86/vtd.c |    2 
 xen/include/asm-x86/domain.h          |    5 
 xen/include/asm-x86/irq.h             |    6 
 xen/include/asm-x86/pirq.h            |   11 +
 xen/include/public/physdev.h          |   27 +++
 18 files changed, 530 insertions(+), 26 deletions(-)

diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/ioemu/hw/pass-through.c
--- a/tools/ioemu/hw/pass-through.c     Thu May 01 10:26:58 2008 +0100
+++ b/tools/ioemu/hw/pass-through.c     Thu May 01 10:30:22 2008 +0100
@@ -519,7 +519,21 @@ struct pt_dev * register_real_device(PCI
     e_intx = assigned_device->dev.config[0x3d]-1;
 
     if ( PT_MACHINE_IRQ_AUTO == machine_irq )
+    {
+        int pirq = pci_dev->irq;
+
         machine_irq = pci_dev->irq;
+        rc = xc_physdev_map_pirq(xc_handle, domid, MAP_PIRQ_TYPE_GSI,
+                                machine_irq, &pirq);
+
+        if ( rc )
+        {
+            /* TBD: unregister device in case of an error */
+            PT_LOG("Error: Mapping irq failed, rc = %d\n", rc);
+        }
+        else
+            machine_irq = pirq;
+    }
 
     /* bind machine_irq to device */
     if ( 0 != machine_irq )
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/libxc/xc_physdev.c
--- a/tools/libxc/xc_physdev.c  Thu May 01 10:26:58 2008 +0100
+++ b/tools/libxc/xc_physdev.c  Thu May 01 10:30:22 2008 +0100
@@ -19,3 +19,43 @@ int xc_physdev_pci_access_modify(int xc_
     errno = ENOSYS;
     return -1;
 }
+
+int xc_physdev_map_pirq(int xc_handle,
+                        int domid,
+                        int type,
+                        int index,
+                        int *pirq)
+{
+    int rc;
+    struct physdev_map_pirq map;
+
+    if ( !pirq )
+        return -EINVAL;
+
+    map.domid = domid;
+    map.type = type;
+    map.index = index;
+    map.pirq = *pirq;
+
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map);
+
+    if ( !rc )
+        *pirq = map.pirq;
+
+    return rc;
+}
+
+int xc_physdev_unmap_pirq(int xc_handle,
+                          int domid,
+                          int pirq)
+{
+    int rc;
+    struct physdev_unmap_pirq unmap;
+
+    unmap.domid = domid;
+    unmap.pirq = pirq;
+
+    rc = do_physdev_op(xc_handle, PHYSDEVOP_unmap_pirq, &unmap);
+
+    return rc;
+}
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Thu May 01 10:26:58 2008 +0100
+++ b/tools/libxc/xc_private.h  Thu May 01 10:30:22 2008 +0100
@@ -24,10 +24,12 @@
 #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 }
 #define DECLARE_DOMCTL struct xen_domctl domctl = { 0 }
 #define DECLARE_SYSCTL struct xen_sysctl sysctl = { 0 }
+#define DECLARE_PHYSDEV_OP struct physdev_op physdev_op = { 0 }
 #else
 #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall
 #define DECLARE_DOMCTL struct xen_domctl domctl
 #define DECLARE_SYSCTL struct xen_sysctl sysctl
+#define DECLARE_PHYSDEV_OP struct physdev_op physdev_op
 #endif
 
 #undef PAGE_SHIFT
@@ -94,6 +96,34 @@ static inline int do_xen_version(int xc_
     hypercall.arg[1] = (unsigned long) dest;
 
     return do_xen_hypercall(xc_handle, &hypercall);
+}
+
+static inline int do_physdev_op(int xc_handle, int cmd, void *op)
+{
+    int ret = -1;
+
+    DECLARE_HYPERCALL;
+    hypercall.op = __HYPERVISOR_physdev_op;
+    hypercall.arg[0] = (unsigned long) cmd;
+    hypercall.arg[1] = (unsigned long) op;
+
+    if ( lock_pages(op, sizeof(*op)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
+
+    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("physdev operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    unlock_pages(op, sizeof(*op));
+
+out1:
+    return ret;
 }
 
 static inline int do_domctl(int xc_handle, struct xen_domctl *domctl)
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Thu May 01 10:26:58 2008 +0100
+++ b/tools/libxc/xenctrl.h     Thu May 01 10:30:22 2008 +0100
@@ -21,6 +21,7 @@
 #include <stdint.h>
 #include <xen/xen.h>
 #include <xen/domctl.h>
+#include <xen/physdev.h>
 #include <xen/sysctl.h>
 #include <xen/version.h>
 #include <xen/event_channel.h>
@@ -849,6 +850,16 @@ int xc_gnttab_set_max_grants(int xcg_han
 int xc_gnttab_set_max_grants(int xcg_handle,
                             uint32_t count);
 
+int xc_physdev_map_pirq(int xc_handle,
+                        int domid,
+                        int type,
+                        int index,
+                        int *pirq);
+
+int xc_physdev_unmap_pirq(int xc_handle,
+                          int domid,
+                          int pirq);
+
 int xc_hvm_set_pci_intx_level(
     int xc_handle, domid_t dom,
     uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu May 01 10:26:58 2008 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu May 01 10:30:22 2008 +0100
@@ -799,6 +799,26 @@ static PyObject *pyxc_evtchn_reset(XcObj
     return zero;
 }
 
+static PyObject *pyxc_physdev_map_pirq(PyObject *self,
+                                       PyObject *args,
+                                       PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    uint32_t dom;
+    int index, pirq, ret;
+
+    static char *kwd_list[] = {"domid", "index", "pirq", NULL};
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwd_list,
+                                      &dom, &index, &pirq) )
+        return NULL;
+    ret = xc_physdev_map_pirq(xc->xc_handle, dom, MAP_PIRQ_TYPE_GSI,
+                             index, &pirq);
+    if ( ret != 0 )
+          return pyxc_error_to_exception();
+    return PyLong_FromUnsignedLong(pirq);
+}
+
 static PyObject *pyxc_physdev_pci_access_modify(XcObject *self,
                                                 PyObject *args,
                                                 PyObject *kwds)
@@ -1588,6 +1608,15 @@ static PyMethodDef pyxc_methods[] = {
       METH_VARARGS | METH_KEYWORDS, "\n"
       "Reset all connections.\n"
       " dom [int]: Domain to reset.\n" },
+
+    { "physdev_map_pirq",
+      (PyCFunction)pyxc_physdev_map_pirq,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "map physical irq to guest pirq.\n"
+      " dom     [int]:      Identifier of domain to map for.\n"
+      " index   [int]:      physical irq.\n"
+      " pirq    [int]:      guest pirq.\n"
+      "Returns: [long] value of the param.\n" },
 
     { "physdev_pci_access_modify",
       (PyCFunction)pyxc_physdev_pci_access_modify,
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/python/xen/xend/server/irqif.py
--- a/tools/python/xen/xend/server/irqif.py     Thu May 01 10:26:58 2008 +0100
+++ b/tools/python/xen/xend/server/irqif.py     Thu May 01 10:30:22 2008 +0100
@@ -69,5 +69,10 @@ class IRQController(DevController):
             #todo non-fatal
             raise VmError(
                 'irq: Failed to configure irq: %d' % (pirq))
-
+        rc = xc.physdev_map_pirq(domid = self.getDomid(),
+                                index = pirq,
+                                pirq  = pirq)
+        if rc < 0:
+            raise VmError(
+                'irq: Failed to map irq %x' % (pirq))
         return (None, {}, {})
diff -r 8bced3d8a907 -r 6ecbb00e58cd tools/python/xen/xend/server/pciif.py
--- a/tools/python/xen/xend/server/pciif.py     Thu May 01 10:26:58 2008 +0100
+++ b/tools/python/xen/xend/server/pciif.py     Thu May 01 10:30:22 2008 +0100
@@ -270,6 +270,12 @@ class PciController(DevController):
                     allow_access = True)
             if rc<0:
                 raise VmError(('pci: failed to configure I/O memory on device 
'+
+                            '%s - errno=%d')%(dev.name,rc))
+            rc = xc.physdev_map_pirq(domid = fe_domid,
+                                   index = dev.irq,
+                                   pirq  = dev.irq)
+            if rc < 0:
+                raise VmError(('pci: failed to map irq on device '+
                             '%s - errno=%d')%(dev.name,rc))
 
         if dev.irq>0:
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu May 01 10:26:58 2008 +0100
+++ b/xen/arch/x86/domain.c     Thu May 01 10:30:22 2008 +0100
@@ -525,6 +525,8 @@ int arch_domain_create(struct domain *d,
             goto fail;
     }
 
+    spin_lock_init(&d->arch.irq_lock);
+
     if ( is_hvm_domain(d) )
     {
         if ( (rc = hvm_domain_initialise(d)) != 0 )
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Thu May 01 10:26:58 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/intr.c       Thu May 01 10:30:22 2008 +0100
@@ -121,7 +121,7 @@ static void vmx_dirq_assist(struct vcpu 
         if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
             continue;
 
-        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
+        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]);
 
         list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
         {
@@ -140,7 +140,7 @@ static void vmx_dirq_assist(struct vcpu 
          * guest will never deal with the irq, then the physical interrupt line
          * will never be deasserted.
          */
-        set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
+        set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
                   NOW() + PT_IRQ_TIME_OUT);
     }
 }
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Thu May 01 10:26:58 2008 +0100
+++ b/xen/arch/x86/io_apic.c    Thu May 01 10:30:22 2008 +0100
@@ -40,6 +40,25 @@
 
 int (*ioapic_renumber_irq)(int ioapic, int irq);
 atomic_t irq_mis_count;
+
+int msi_irq_enable = 0;
+boolean_param("msi_irq_enable", msi_irq_enable);
+
+int domain_irq_to_vector(struct domain *d, int irq)
+{
+    if ( !msi_irq_enable )
+        return irq_to_vector(irq);
+    else
+        return d->arch.pirq_vector[irq];
+}
+
+int domain_vector_to_irq(struct domain *d, int vector)
+{
+    if ( !msi_irq_enable )
+        return vector_to_irq(vector);
+    else
+        return d->arch.vector_pirq[vector];
+}
 
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Thu May 01 10:26:58 2008 +0100
+++ b/xen/arch/x86/irq.c        Thu May 01 10:30:22 2008 +0100
@@ -203,7 +203,6 @@ static DEFINE_PER_CPU(struct pending_eoi
 
 static void __do_IRQ_guest(int vector)
 {
-    unsigned int        irq = vector_to_irq(vector);
     irq_desc_t         *desc = &irq_desc[vector];
     irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
     struct domain      *d;
@@ -232,7 +231,9 @@ static void __do_IRQ_guest(int vector)
 
     for ( i = 0; i < action->nr_guests; i++ )
     {
+        unsigned int irq;
         d = action->guest[i];
+        irq = domain_vector_to_irq(d, vector);
         if ( (action->ack_type != ACKTYPE_NONE) &&
              !test_and_set_bit(irq, d->pirq_mask) )
             action->in_flight++;
@@ -305,8 +306,10 @@ static void __pirq_guest_eoi(struct doma
     irq_desc_t         *desc;
     irq_guest_action_t *action;
     cpumask_t           cpu_eoi_map;
-
-    desc   = &irq_desc[irq_to_vector(irq)];
+    int                 vector;
+
+    vector = domain_irq_to_vector(d, irq);
+    desc   = &irq_desc[vector];
     action = (irq_guest_action_t *)desc->action;
 
     spin_lock_irq(&desc->lock);
@@ -324,7 +327,7 @@ static void __pirq_guest_eoi(struct doma
     if ( action->ack_type == ACKTYPE_UNMASK )
     {
         ASSERT(cpus_empty(action->cpu_eoi_map));
-        desc->handler->end(irq_to_vector(irq));
+        desc->handler->end(vector);
         spin_unlock_irq(&desc->lock);
         return;
     }
@@ -375,12 +378,12 @@ int pirq_guest_unmask(struct domain *d)
 }
 
 extern int ioapic_ack_new;
-int pirq_acktype(int irq)
+int pirq_acktype(struct domain *d, int irq)
 {
     irq_desc_t  *desc;
     unsigned int vector;
 
-    vector = irq_to_vector(irq);
+    vector = domain_irq_to_vector(d, irq);
     if ( vector == 0 )
         return ACKTYPE_NONE;
 
@@ -421,7 +424,7 @@ int pirq_acktype(int irq)
     return 0;
 }
 
-int pirq_shared(int irq)
+int pirq_shared(struct domain *d, int irq)
 {
     unsigned int        vector;
     irq_desc_t         *desc;
@@ -429,7 +432,7 @@ int pirq_shared(int irq)
     unsigned long       flags;
     int                 shared;
 
-    vector = irq_to_vector(irq);
+    vector = domain_irq_to_vector(d, irq);
     if ( vector == 0 )
         return 0;
 
@@ -453,7 +456,7 @@ int pirq_guest_bind(struct vcpu *v, int 
     cpumask_t           cpumask = CPU_MASK_NONE;
 
  retry:
-    vector = irq_to_vector(irq);
+    vector = domain_irq_to_vector(v->domain, irq);
     if ( vector == 0 )
         return -EINVAL;
 
@@ -487,7 +490,7 @@ int pirq_guest_bind(struct vcpu *v, int 
         action->nr_guests   = 0;
         action->in_flight   = 0;
         action->shareable   = will_share;
-        action->ack_type    = pirq_acktype(irq);
+        action->ack_type    = pirq_acktype(v->domain, irq);
         cpus_clear(action->cpu_eoi_map);
 
         desc->depth = 0;
@@ -538,13 +541,15 @@ int pirq_guest_bind(struct vcpu *v, int 
 
 int pirq_guest_unbind(struct domain *d, int irq)
 {
-    unsigned int        vector = irq_to_vector(irq);
-    irq_desc_t         *desc = &irq_desc[vector];
+    unsigned int        vector;
+    irq_desc_t         *desc;
     irq_guest_action_t *action;
     cpumask_t           cpu_eoi_map;
     unsigned long       flags;
     int                 i;
 
+    vector = domain_irq_to_vector(d, irq);
+    desc = &irq_desc[vector];
     BUG_ON(vector == 0);
 
     spin_lock_irqsave(&desc->lock, flags);
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c    Thu May 01 10:26:58 2008 +0100
+++ b/xen/arch/x86/physdev.c    Thu May 01 10:30:22 2008 +0100
@@ -7,6 +7,7 @@
 #include <xen/irq.h>
 #include <xen/event.h>
 #include <xen/guest_access.h>
+#include <xen/iocap.h>
 #include <asm/current.h>
 #include <asm/hypercall.h>
 #include <public/xen.h>
@@ -24,6 +25,263 @@ ioapic_guest_write(
 ioapic_guest_write(
     unsigned long physbase, unsigned int reg, u32 pval);
 
+static int get_free_pirq(struct domain *d, int type, int index)
+{
+    int i;
+
+    if ( d == NULL )
+        return -EINVAL;
+
+    ASSERT(spin_is_locked(&d->arch.irq_lock));
+
+    if ( type == MAP_PIRQ_TYPE_GSI )
+    {
+        for ( i = 16; i < NR_PIRQS; i++ )
+            if ( !d->arch.pirq_vector[i] )
+                break;
+        if ( i == NR_PIRQS )
+            return -ENOSPC;
+    }
+    else
+    {
+        for ( i = NR_PIRQS - 1; i >= 16; i-- )
+            if ( !d->arch.pirq_vector[i] )
+                break;
+        if ( i == 16 )
+            return -ENOSPC;
+    }
+
+    return i;
+}
+
+/*
+ * Caller hold the irq_lock
+ */
+static int map_domain_pirq(struct domain *d, int pirq, int vector, int type)
+{
+    int ret = 0;
+    int old_vector, old_pirq;
+
+    if ( d == NULL )
+        return -EINVAL;
+
+    ASSERT(spin_is_locked(&d->arch.irq_lock));
+
+    if ( !IS_PRIV(current->domain) )
+        return -EPERM;
+
+    if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
+    {
+        gdprintk(XENLOG_G_ERR,
+                 "invalid pirq %x or vector %x\n", pirq, vector);
+        return -EINVAL;
+    }
+
+    old_vector = d->arch.pirq_vector[pirq];
+    old_pirq = d->arch.vector_pirq[vector];
+
+    if ( (old_vector && (old_vector != vector) ) ||
+         (old_pirq && (old_pirq != pirq)) )
+    {
+        gdprintk(XENLOG_G_ERR, "remap pirq %x vector %x while not unmap\n",
+                 pirq, vector);
+        ret = -EINVAL;
+        goto done;
+    }
+
+    ret = irq_permit_access(d, pirq);
+    if ( ret )
+    {
+        gdprintk(XENLOG_G_ERR, "add irq permit access %x failed\n", pirq);
+        ret = -EINVAL;
+        goto done;
+    }
+
+    d->arch.pirq_vector[pirq] = vector;
+    d->arch.vector_pirq[vector] = pirq;
+
+done:
+    return ret;
+}
+
+/*
+ * The pirq should has been unbound before this call
+ */
+static int unmap_domain_pirq(struct domain *d, int pirq)
+{
+    int ret = 0;
+    int vector;
+
+    if ( d == NULL || pirq < 0 || pirq > NR_PIRQS )
+        return -EINVAL;
+
+    if ( !IS_PRIV(current->domain) )
+        return -EINVAL;
+
+    ASSERT(spin_is_locked(&d->arch.irq_lock));
+
+    vector = d->arch.pirq_vector[pirq];
+
+    if ( !vector )
+    {
+        gdprintk(XENLOG_G_ERR, "domain %X: pirq %x not mapped still\n",
+                 d->domain_id, pirq);
+        ret = -EINVAL;
+    }
+    else
+        d->arch.pirq_vector[pirq] = d->arch.vector_pirq[vector] = 0;
+    ret = irq_deny_access(d, pirq);
+
+    if ( ret )
+        gdprintk(XENLOG_G_ERR, "deny irq %x access failed\n", pirq);
+
+    return ret;
+}
+
+extern int msi_irq_enable;
+static int physdev_map_pirq(struct physdev_map_pirq *map)
+{
+    struct domain *d;
+    int vector, pirq, ret = 0;
+    unsigned long flags;
+
+    /* if msi_irq_enable is not enabled,map always success */
+    if ( !msi_irq_enable )
+        return 0;
+
+    if ( !IS_PRIV(current->domain) )
+        return -EPERM;
+
+    if ( !map )
+        return -EINVAL;
+
+    if ( map->domid == DOMID_SELF )
+        d = rcu_lock_domain(current->domain);
+    else
+        d = rcu_lock_domain_by_id(map->domid);
+
+    if ( d == NULL )
+    {
+        ret = -ESRCH;
+        goto free_domain;
+    }
+
+    switch ( map->type )
+    {
+        case MAP_PIRQ_TYPE_GSI:
+            if ( map->index >= NR_IRQS )
+            {
+                ret = -EINVAL;
+                gdprintk(XENLOG_G_ERR,
+                         "map invalid irq %x\n", map->index);
+                goto free_domain;
+            }
+            vector = IO_APIC_VECTOR(map->index);
+            if ( !vector )
+            {
+                ret = -EINVAL;
+                gdprintk(XENLOG_G_ERR,
+                         "map irq with no vector %x\n", map->index);
+                goto free_domain;
+            }
+            break;
+        case MAP_PIRQ_TYPE_MSI:
+            vector = map->index;
+            if ( vector < 0 || vector >= NR_VECTORS )
+            {
+                ret = -EINVAL;
+                gdprintk(XENLOG_G_ERR,
+                         "map_pirq with wrong vector %x\n", map->index);
+                goto free_domain;
+            }
+            break;
+        default:
+            ret = -EINVAL;
+            gdprintk(XENLOG_G_ERR, "wrong map_pirq type %x\n", map->type);
+            goto free_domain;
+            break;
+    }
+
+    spin_lock_irqsave(&d->arch.irq_lock, flags);
+    if ( map->pirq == -1 )
+    {
+        if ( d->arch.vector_pirq[vector] )
+        {
+            gdprintk(XENLOG_G_ERR, "%x %x mapped already%x\n",
+                                    map->index, map->pirq,
+                                    d->arch.vector_pirq[vector]);
+            pirq = d->arch.vector_pirq[vector];
+        }
+        else
+        {
+            pirq = get_free_pirq(d, map->type, map->index);
+            if ( pirq < 0 )
+            {
+                ret = pirq;
+                gdprintk(XENLOG_G_ERR, "No free pirq\n");
+                goto done;
+            }
+        }
+    }
+    else
+    {
+        if ( d->arch.vector_pirq[vector] &&
+             d->arch.vector_pirq[vector] != map->pirq )
+        {
+            gdprintk(XENLOG_G_ERR, "%x conflict with %x\n",
+              map->index, map->pirq);
+            ret = -EEXIST;
+            goto done;
+        }
+        else
+            pirq = map->pirq;
+    }
+
+    ret = map_domain_pirq(d, pirq, vector, map->type);
+
+    if ( !ret )
+        map->pirq = pirq;
+done:
+    spin_unlock_irqrestore(&d->arch.irq_lock, flags);
+free_domain:
+    rcu_unlock_domain(d);
+    return ret;
+}
+
+static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
+{
+    struct domain *d;
+    unsigned long flags;
+    int ret;
+
+    if ( !msi_irq_enable )
+        return 0;
+
+    if ( !IS_PRIV(current->domain) )
+        return -EPERM;
+
+    if ( !unmap )
+        return -EINVAL;
+
+    if ( unmap->domid == DOMID_SELF )
+        d = rcu_lock_domain(current->domain);
+    else
+        d = rcu_lock_domain_by_id(unmap->domid);
+
+    if ( d == NULL )
+    {
+        rcu_unlock_domain(d);
+        return -ESRCH;
+    }
+
+    spin_lock_irqsave(&d->arch.irq_lock, flags);
+    ret = unmap_domain_pirq(d, unmap->pirq);
+    spin_unlock_irqrestore(&d->arch.irq_lock, flags);
+    rcu_unlock_domain(d);
+
+    return ret;
+}
+
 ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
 {
     int irq;
@@ -57,11 +315,35 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
         if ( (irq < 0) || (irq >= NR_IRQS) )
             break;
         irq_status_query.flags = 0;
-        if ( pirq_acktype(irq) != 0 )
+        if ( pirq_acktype(v->domain, irq) != 0 )
             irq_status_query.flags |= XENIRQSTAT_needs_eoi;
-        if ( pirq_shared(irq) )
+        if ( pirq_shared(v->domain, irq) )
             irq_status_query.flags |= XENIRQSTAT_shared;
         ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
+        break;
+    }
+
+    case PHYSDEVOP_map_pirq: {
+        struct physdev_map_pirq map;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&map, arg, 1) != 0 )
+            break;
+
+        ret = physdev_map_pirq(&map);
+        if ( copy_to_guest(arg, &map, 1) != 0 )
+            ret = -EFAULT;
+        break;
+    }
+
+    case PHYSDEVOP_unmap_pirq: {
+        struct physdev_unmap_pirq unmap;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&unmap, arg, 1) != 0 )
+            break;
+
+        ret = physdev_unmap_pirq(&unmap);
         break;
     }
 
@@ -99,6 +381,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
 
     case PHYSDEVOP_alloc_irq_vector: {
         struct physdev_irq irq_op;
+        unsigned long flags;
 
         ret = -EFAULT;
         if ( copy_from_guest(&irq_op, arg, 1) != 0 )
@@ -118,7 +401,20 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
             break;
 
         irq_op.vector = assign_irq_vector(irq);
-        ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
+
+        ret = 0;
+
+        if ( msi_irq_enable )
+        {
+            spin_lock_irqsave(&dom0->arch.irq_lock, flags);
+            if ( irq != AUTO_ASSIGN )
+                ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
+                                     MAP_PIRQ_TYPE_GSI);
+            spin_unlock_irqrestore(&dom0->arch.irq_lock, flags);
+        }
+
+        if ( copy_to_guest(arg, &irq_op, 1) != 0 )
+            ret = -EFAULT;
         break;
     }
 
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/drivers/passthrough/io.c
--- a/xen/drivers/passthrough/io.c      Thu May 01 10:26:58 2008 +0100
+++ b/xen/drivers/passthrough/io.c      Thu May 01 10:30:22 2008 +0100
@@ -25,6 +25,7 @@ static void pt_irq_time_out(void *data)
 {
     struct hvm_mirq_dpci_mapping *irq_map = data;
     unsigned int guest_gsi, machine_gsi = 0;
+    int vector;
     struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
     struct dev_intx_gsi_link *digl;
     uint32_t device, intx;
@@ -39,7 +40,8 @@ static void pt_irq_time_out(void *data)
     }
 
     clear_bit(machine_gsi, dpci->dirq_mask);
-    stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+    vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
+    stop_timer(&dpci->hvm_timer[vector]);
     spin_lock(&dpci->dirq_lock);
     dpci->mirq[machine_gsi].pending = 0;
     spin_unlock(&dpci->dirq_lock);
@@ -98,7 +100,7 @@ int pt_irq_create_bind_vtd(
         hvm_irq_dpci->mirq[machine_gsi].valid = 1;
         hvm_irq_dpci->mirq[machine_gsi].dom = d;
 
-        init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)],
+        init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)],
                    pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
         /* Deal with gsi for legacy devices */
         pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE);
@@ -157,7 +159,7 @@ int pt_irq_destroy_bind_vtd(
         if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
         {
             pirq_guest_unbind(d, machine_gsi);
-            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+            kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)]);
             hvm_irq_dpci->mirq[machine_gsi].dom   = NULL;
             hvm_irq_dpci->mirq[machine_gsi].valid = 0;
         }
@@ -185,7 +187,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un
      * PIC) and we need to detect that.
      */
     set_bit(mirq, dpci->dirq_mask);
-    set_timer(&dpci->hvm_timer[irq_to_vector(mirq)],
+    set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)],
               NOW() + PT_IRQ_TIME_OUT);
     vcpu_kick(d->vcpu[0]);
 
@@ -221,7 +223,7 @@ void hvm_dpci_eoi(struct domain *d, unsi
 
         gdprintk(XENLOG_INFO VTDPREFIX,
                  "hvm_dpci_eoi:: mirq = %x\n", machine_gsi);
-        stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]);
+        stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, 
machine_gsi)]);
         if ( (ent == NULL) || !ent->fields.mask )
             pirq_guest_eoi(d, machine_gsi);
     }
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/drivers/passthrough/vtd/x86/vtd.c
--- a/xen/drivers/passthrough/vtd/x86/vtd.c     Thu May 01 10:26:58 2008 +0100
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c     Thu May 01 10:30:22 2008 +0100
@@ -114,7 +114,7 @@ void hvm_dpci_isairq_eoi(struct domain *
                 if ( --dpci->mirq[i].pending == 0 )
                 {
                     spin_unlock(&dpci->dirq_lock);
-                    stop_timer(&dpci->hvm_timer[irq_to_vector(i)]);
+                    stop_timer(&dpci->hvm_timer[domain_irq_to_vector(d, i)]);
                     pirq_guest_eoi(d, i);
                 }
                 else
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu May 01 10:26:58 2008 +0100
+++ b/xen/include/asm-x86/domain.h      Thu May 01 10:30:22 2008 +0100
@@ -6,6 +6,7 @@
 #include <asm/hvm/vcpu.h>
 #include <asm/hvm/domain.h>
 #include <asm/e820.h>
+#include <asm/pirq.h>
 
 #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
 #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
@@ -221,6 +222,10 @@ struct arch_domain
 
     /* Shadow translated domain: P2M mapping */
     pagetable_t phys_table;
+
+    spinlock_t irq_lock;
+    int vector_pirq[NR_VECTORS];
+    int pirq_vector[NR_PIRQS];
 
     /* Pseudophysical e820 map (XENMEM_memory_map).  */
     struct e820entry e820[3];
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Thu May 01 10:26:58 2008 +0100
+++ b/xen/include/asm-x86/irq.h Thu May 01 10:30:22 2008 +0100
@@ -49,7 +49,9 @@ extern atomic_t irq_err_count;
 extern atomic_t irq_err_count;
 extern atomic_t irq_mis_count;
 
-int pirq_acktype(int irq);
-int pirq_shared(int irq);
+int pirq_acktype(struct domain *d, int irq);
+int pirq_shared(struct domain *d , int irq);
 
+extern int domain_irq_to_vector(struct domain *d, int irq);
+extern int domain_vector_to_irq(struct domain *d, int vector);
 #endif /* _ASM_HW_IRQ_H */
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/include/asm-x86/pirq.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/pirq.h        Thu May 01 10:30:22 2008 +0100
@@ -0,0 +1,11 @@
+#ifndef __XEN_PIRQ_H
+#define __XEN_PIRQ_H
+
+#define PIRQ_BASE       0
+#define NR_PIRQS        256
+
+#define DYNIRQ_BASE     (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS      256
+
+#endif /* __XEN_PIRQ_H */
+
diff -r 8bced3d8a907 -r 6ecbb00e58cd xen/include/public/physdev.h
--- a/xen/include/public/physdev.h      Thu May 01 10:26:58 2008 +0100
+++ b/xen/include/public/physdev.h      Thu May 01 10:30:22 2008 +0100
@@ -121,6 +121,33 @@ struct physdev_irq {
 };
 typedef struct physdev_irq physdev_irq_t;
 DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
+ 
+#define MAP_PIRQ_TYPE_MSI               0x0
+#define MAP_PIRQ_TYPE_GSI               0x1
+#define MAP_PIRQ_TYPE_UNKNOWN           0x2
+
+#define PHYSDEVOP_map_pirq               13
+struct physdev_map_pirq {
+    domid_t domid;
+    /* IN */
+    int type;
+    /* IN */
+    int index;
+    /* IN or OUT */
+    int pirq;
+};
+typedef struct physdev_map_pirq physdev_map_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
+
+#define PHYSDEVOP_unmap_pirq             14
+struct physdev_unmap_pirq {
+    domid_t domid;
+    /* IN */
+    int pirq;
+};
+
+typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
 
 /*
  * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] MSI 2/6: change the pirq to be per-domain, Xen patchbot-unstable <=