[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] xen/x86: Patch re-factors MSI/X config code from, drivers/passthrough/pci.c to x86 specific



This patch re-factors msi specific code to x86 specific files from
xen/drivers/passthrough/pci.c.

Signed-off-by: Manish Jaggi <manish.jaggi@xxxxxxxxxxxxxxxxxx>
---
 xen/drivers/passthrough/pci.c        |  102 +-----------------------------
 xen/drivers/passthrough/x86/Makefile |    1 +
 xen/drivers/passthrough/x86/pci.c    |  115 ++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/msi.h            |    1 -
 xen/include/xen/pci.h                |   20 +++++-
 5 files changed, 137 insertions(+), 102 deletions(-)

diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index ecd061e..004aba9 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -23,7 +23,6 @@
 #include <xen/iommu.h>
 #include <xen/irq.h>
 #include <asm/hvm/iommu.h>
-#include <asm/hvm/irq.h>
 #include <xen/delay.h>
 #include <xen/keyhandler.h>
 #include <xen/event.h>
@@ -33,21 +32,6 @@
 #include <xen/softirq.h>
 #include <xen/tasklet.h>
 #include <xsm/xsm.h>
-#include <asm/msi.h>
-
-struct pci_seg {
-    struct list_head alldevs_list;
-    u16 nr;
-    unsigned long *ro_map;
-    /* bus2bridge_lock protects bus2bridge array */
-    spinlock_t bus2bridge_lock;
-#define MAX_BUSES 256
-    struct {
-        u8 map;
-        u8 bus;
-        u8 devfn;
-    } bus2bridge[MAX_BUSES];
-};
spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
 static struct radix_tree_root pci_segments;
@@ -282,22 +266,10 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, 
u8 bus, u8 devfn)
     *((u8*) &pdev->bus) = bus;
     *((u8*) &pdev->devfn) = devfn;
     pdev->domain = NULL;
-    INIT_LIST_HEAD(&pdev->msi_list);
-
-    if ( pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
-                             PCI_CAP_ID_MSIX) )
+    if (!pci_alloc_msix (pdev, pseg, bus, devfn))
     {
-        struct arch_msix *msix = xzalloc(struct arch_msix);
-
-        if ( !msix )
-        {
-            xfree(pdev);
-            return NULL;
-        }
-        spin_lock_init(&msix->table_lock);
-        pdev->msix = msix;
+        return NULL;
     }
-
     list_add(&pdev->alldevs_list, &pseg->alldevs_list);
/* update bus2bridge */
@@ -755,54 +727,6 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
     return ret;
 }
-static int pci_clean_dpci_irq(struct domain *d,
-                              struct hvm_pirq_dpci *pirq_dpci, void *arg)
-{
-    struct dev_intx_gsi_link *digl, *tmp;
-
-    pirq_guest_unbind(d, dpci_pirq(pirq_dpci));
-
-    if ( pt_irq_need_timer(pirq_dpci->flags) )
-        kill_timer(&pirq_dpci->timer);
-
-    list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
-    {
-        list_del(&digl->list);
-        xfree(digl);
-    }
-
-    return pt_pirq_softirq_active(pirq_dpci) ? -ERESTART : 0;
-}
-
-static int pci_clean_dpci_irqs(struct domain *d)
-{
-    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
-
-    if ( !iommu_enabled )
-        return 0;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
-
-    spin_lock(&d->event_lock);
-    hvm_irq_dpci = domain_get_irq_dpci(d);
-    if ( hvm_irq_dpci != NULL )
-    {
-        int ret = pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
-
-        if ( ret )
-        {
-            spin_unlock(&d->event_lock);
-            return ret;
-        }
-
-        d->arch.hvm_domain.irq.dpci = NULL;
-        free_hvm_irq_dpci(hvm_irq_dpci);
-    }
-    spin_unlock(&d->event_lock);
-    return 0;
-}
-
 int pci_release_devices(struct domain *d)
 {
     struct pci_dev *pdev;
@@ -1186,28 +1110,6 @@ bool_t pcie_aer_get_firmware_first(const struct pci_dev 
*pdev)
 }
 #endif
-static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
-{
-    struct pci_dev *pdev;
-    struct msi_desc *msi;
-
-    printk("==== segment %04x ====\n", pseg->nr);
-
-    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
-    {
-        printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ",
-               pseg->nr, pdev->bus,
-               PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
-               pdev->domain ? pdev->domain->domain_id : -1,
-               (pdev->node != NUMA_NO_NODE) ? pdev->node : -1);
-        list_for_each_entry ( msi, &pdev->msi_list, list )
-               printk("%d ", msi->irq);
-        printk(">\n");
-    }
-
-    return 0;
-}
-
 static void dump_pci_devices(unsigned char ch)
 {
     printk("==== PCI devices ====\n");
diff --git a/xen/drivers/passthrough/x86/Makefile 
b/xen/drivers/passthrough/x86/Makefile
index a70cf94..a2bcf94 100644
--- a/xen/drivers/passthrough/x86/Makefile
+++ b/xen/drivers/passthrough/x86/Makefile
@@ -1,2 +1,3 @@
 obj-y += ats.o
 obj-y += iommu.o
+obj-$(HAS_PCI) += pci.o
diff --git a/xen/drivers/passthrough/x86/pci.c 
b/xen/drivers/passthrough/x86/pci.c
new file mode 100644
index 0000000..cf37b0a
--- /dev/null
+++ b/xen/drivers/passthrough/x86/pci.c
@@ -0,0 +1,115 @@
+/*
+ * x86 specific code for PCI MSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2015 Cavium Networks
+ *
+ * Author: Manish Jaggi <manish.jaggi@xxxxxxxxxx>
+ */
+#include <xen/pci.h>
+#include <xen/sched.h>
+#include <asm/hvm/irq.h>
+#include <asm/msi.h>
+
+int _dump_pci_devices(struct pci_seg *pseg, void *arg)
+{
+    struct pci_dev *pdev;
+    struct msi_desc *msi;
+
+    printk("==== segment %04x ====\n", pseg->nr);
+
+    list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
+    {
+        printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ",
+               pseg->nr, pdev->bus,
+               PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+               pdev->domain ? pdev->domain->domain_id : -1,
+               (pdev->node != NUMA_NO_NODE) ? pdev->node : -1);
+        list_for_each_entry ( msi, &pdev->msi_list, list )
+               printk("%d ", msi->irq);
+        printk(">\n");
+    }
+
+    return 0;
+}
+
+int pci_clean_dpci_irq(struct domain *d,
+                              struct hvm_pirq_dpci *pirq_dpci, void *arg)
+{
+    struct dev_intx_gsi_link *digl, *tmp;
+
+    pirq_guest_unbind(d, dpci_pirq(pirq_dpci));
+
+    if ( pt_irq_need_timer(pirq_dpci->flags) )
+        kill_timer(&pirq_dpci->timer);
+
+    list_for_each_entry_safe ( digl, tmp, &pirq_dpci->digl_list, list )
+    {
+        list_del(&digl->list);
+        xfree(digl);
+    }
+
+    return pt_pirq_softirq_active(pirq_dpci) ? -ERESTART : 0;
+}
+
+int pci_clean_dpci_irqs(struct domain *d)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = NULL;
+
+    if ( !iommu_enabled )
+        return 0;
+
+    if ( !is_hvm_domain(d) )
+        return 0;
+
+    spin_lock(&d->event_lock);
+    hvm_irq_dpci = domain_get_irq_dpci(d);
+    if ( hvm_irq_dpci != NULL )
+    {
+        int ret = pt_pirq_iterate(d, pci_clean_dpci_irq, NULL);
+
+        if ( ret )
+        {
+            spin_unlock(&d->event_lock);
+            return ret;
+        }
+
+        d->arch.hvm_domain.irq.dpci = NULL;
+        free_hvm_irq_dpci(hvm_irq_dpci);
+    }
+    spin_unlock(&d->event_lock);
+    return 0;
+}
+
+struct pci_dev* pci_alloc_msix(struct pci_dev *pdev, struct pci_seg  *pseg, u8 
bus, u8 devfn)
+{
+    INIT_LIST_HEAD(&pdev->msi_list);
+
+    if ( pci_find_cap_offset(pseg->nr, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+                             PCI_CAP_ID_MSIX) )
+    {
+        struct arch_msix *msix = xzalloc(struct arch_msix);
+
+        if ( !msix )
+        {
+            xfree(pdev);
+            return NULL;
+        }
+        spin_lock_init(&msix->table_lock);
+        pdev->msix = msix;
+    }
+
+    return pdev;
+}
diff --git a/xen/include/asm-x86/msi.h b/xen/include/asm-x86/msi.h
index 4c62a3a..9b2b4a3 100644
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -78,7 +78,6 @@ struct msi_desc;
 extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
 extern void pci_disable_msi(struct msi_desc *desc);
 extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off);
-extern void pci_cleanup_msi(struct pci_dev *pdev);
 extern int setup_msi_irq(struct irq_desc *, struct msi_desc *);
 extern int __setup_msi_irq(struct irq_desc *, struct msi_desc *,
                            const struct hw_interrupt_type *);
diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
index 4377f3e..07e60fe 100644
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -43,6 +43,20 @@ struct pci_dev_info {
     } physfn;
 };
+struct pci_seg {
+    struct list_head alldevs_list;
+    u16 nr;
+    unsigned long *ro_map;
+    /* bus2bridge_lock protects bus2bridge array */
+    spinlock_t bus2bridge_lock;
+#define MAX_BUSES 256
+    struct {
+        u8 map;
+        u8 bus;
+        u8 devfn;
+    } bus2bridge[MAX_BUSES];
+};
+
 struct pci_dev {
     struct list_head alldevs_list;
     struct list_head domain_list;
@@ -155,5 +169,9 @@ struct pirq;
 int msixtbl_pt_register(struct domain *, struct pirq *, uint64_t gtable);
 void msixtbl_pt_unregister(struct domain *, struct pirq *);
 void msixtbl_pt_cleanup(struct domain *d);
-
+struct pci_dev* pci_alloc_msix(struct pci_dev *pdev, struct pci_seg  *pseg,
+                                u8 bus, u8 devfn);
+int pci_clean_dpci_irqs(struct domain *d);
+void pci_cleanup_msi(struct pci_dev *pdev);
+int _dump_pci_devices(struct pci_seg *pseg, void *arg);
 #endif /* __XEN_PCI_H__ */
--
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.