[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux-2.6.18/PCI-MSI: pass segment information to Xen



Also don't pass PCI devices with non-zero segment to the old-style
PCI device registration hypercall (converting that code to the new
call is relatively pointless, as one would at once need to change the
mechanism, since where the call currently happens doesn't guarantee
the hypervisor to get notified of all devices, and that's out of scope
for the legacy tree here).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/drivers/pci/msi-xen.c
+++ b/drivers/pci/msi-xen.c
@@ -25,6 +25,11 @@
 #include "msi.h"
 
 static int pci_msi_enable = 1;
+#if CONFIG_XEN_COMPAT < 0x040200
+static int pci_seg_supported = 1;
+#else
+#define pci_seg_supported 1
+#endif
 
 static struct msi_ops *msi_ops;
 
@@ -210,21 +215,34 @@ static int msi_map_pirq_to_vector(struct
                                  int entry_nr, u64 table_base)
 {
        struct physdev_map_pirq map_irq;
-       int rc;
+       int rc = -EINVAL;
        domid_t domid = DOMID_SELF;
 
        domid = msi_get_dev_owner(dev);
 
        map_irq.domid = domid;
-       map_irq.type = MAP_PIRQ_TYPE_MSI;
+       map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
        map_irq.index = -1;
        map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
-       map_irq.bus = dev->bus->number;
+       map_irq.bus = dev->bus->number | (pci_domain_nr(dev->bus) << 16);
        map_irq.devfn = dev->devfn;
        map_irq.entry_nr = entry_nr;
        map_irq.table_base = table_base;
 
-       if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
+       if (pci_seg_supported)
+               rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+#if CONFIG_XEN_COMPAT < 0x040200
+       if (rc == -EINVAL && !pci_domain_nr(dev->bus)) {
+               map_irq.type = MAP_PIRQ_TYPE_MSI;
+               map_irq.index = -1;
+               map_irq.pirq = -1;
+               map_irq.bus = dev->bus->number;
+               rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+               if (rc != -EINVAL)
+                       pci_seg_supported = false;
+       }
+#endif
+       if (rc)
                printk(KERN_WARNING "map irq failed\n");
 
        if (rc < 0)
--- a/drivers/xen/core/pci.c
+++ b/drivers/xen/core/pci.c
@@ -18,8 +18,10 @@ static int pci_bus_probe_wrapper(struct 
        struct physdev_manage_pci manage_pci;
        struct physdev_manage_pci_ext manage_pci_ext;
 
+       if (pci_domain_nr(pci_dev->bus))
+               r = -ENOSYS;
 #ifdef CONFIG_PCI_IOV
-       if (pci_dev->is_virtfn) {
+       else if (pci_dev->is_virtfn) {
                memset(&manage_pci_ext, 0, sizeof(manage_pci_ext));
                manage_pci_ext.bus = pci_dev->bus->number;
                manage_pci_ext.devfn = pci_dev->devfn;
@@ -28,9 +30,9 @@ static int pci_bus_probe_wrapper(struct 
                manage_pci_ext.physfn.devfn = pci_dev->physfn->devfn;
                r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
                                          &manage_pci_ext);
-       } else
+       }
 #endif
-       if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
+       else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
                memset(&manage_pci_ext, 0, sizeof(manage_pci_ext));
                manage_pci_ext.bus = pci_dev->bus->number;
                manage_pci_ext.devfn = pci_dev->devfn;
@@ -54,6 +56,7 @@ static int pci_bus_remove_wrapper(struct
 {
        int r;
        struct pci_dev *pci_dev = to_pci_dev(dev);
+       int seg = pci_domain_nr(pci_dev->bus);
        struct physdev_manage_pci manage_pci;
        manage_pci.bus = pci_dev->bus->number;
        manage_pci.devfn = pci_dev->devfn;
@@ -60,6 +63,8 @@ static int pci_bus_remove_wrapper(struct
 
        r = pci_bus_remove(dev);
        /* dev and pci_dev are no longer valid!! */
+       if (seg)
+               return r;
 
        WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
                &manage_pci));




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.