[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 07/11] x86/dpci: move code


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • Date: Wed, 31 Mar 2021 12:32:59 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=ATUqzH1qQW24zHt8gWyGrgI3bjAA4vJXVL7l6nZ/QcQ=; b=i0zitUg0x3h3G4KEIQccro8tRY/Y13rHr3eqY7E0bfTha1jkjzm68N3owC5tRz4FxGDVdOhRAs+S4CHIkvY2dKipNWHBGEFhb5yKRacmAht4O4WJgXv7enJBkGCsneHVXHtaxgZdqcPFZ7u6FhhcI9sNlkLMQcBgtYA6nmRdGO4WdAFootLvzyIcAJvCD++exqYhPd4zt3C/65ZswDHZslZh3M0OjifEK0tzfQvmQhyKQzzznudziENJa1XyPoK41PisEpjdEwBtxbM7BJHU3sBithCy21urXzNEPtBtoSA4f5dUsZnIyD68EfWs4KnOl9L6UujWkLfeJnxJ0O3v8w==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=O6R1bRtfnaMG1VyI6gla0ZC2OzHY9+J0GJhXzmaZVnDwvyCumRpjoCwxecgK5cyRVoG5sEyakI0U/Uw3F8PLnKC6ZKToj023GioXMOeY4D68Kie9kqxSfcbfy/Wnw2wrHeSyA7umDsnWUN2fwREd17yTB+Mulyf375zHAMPuQYKRsCZrsfgY0IgwLdOWMhnn+0XIRBl6LpB2dU367l+jalr+l5c5I+32C4rMnOzb2cYWnHXXNDUtwbQ7PtJkC1qzvvrCSZzFYgwLB0EUtpCXeXDEAQt4HSeyzS3peC/miRSQRF5TkuoA+YLswwfZzsbdRuaB7GRTF7Q9E96FVyz3iQ==
  • Authentication-results: esa2.hc3370-68.iphmx.com; dkim=pass (signature verified) header.i=@citrix.onmicrosoft.com
  • Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Paul Durrant <paul@xxxxxxx>
  • Delivery-date: Wed, 31 Mar 2021 10:36:31 +0000
  • Ironport-hdrordr: A9a23:9NCh2KrpzMv1gt5H5O1PZCoaV5vxL9V00zAX/kB9WHVpW+SFis Gjm+ka3xfoiDAXHEotg8yEJbPoex7h3LRy5pQcOqrnYRn+tAKTXeRfxKbB4xmlIS3x8eZByb xtGpIVNPTcBUV35PyU3CCWCNAlqePozImNpcPzi0hgVhtrbaYI1XYaNi++HldtTAdLQboVfa D92uN9qzCteWsaY62AbxFoY8H5q8DWj5WjWBYaBnccgzWmty+i67LxDnGjr3Qjeg5IqI1CzU H11zbXy4/mmPG9xx/a2Qbonu5rseqk8PRvLoihjsAULx/llwqnYp9wMofywQwdkaWUx3sB1P XKvhc8L+R/gkmhAl2dkF/W9CTLlBYo9nP4xleTjRLY0LPEbQN/MeVtr8Z9UHLimi4dleA56o 1n9SalkqASKhX6kCH097HzJmlXv3vxm1UOu6oulXBFOLFuD4N5nMgk025+NowPJy7+4JBPKp gUMOjsoMxbdl6XdBnizw9S6e3pWnwyGyGPSVQZtvqU1CBLnGt4w1Fw/r1noks9
  • Ironport-sdr: WyPNu5IIhJ6yPNAk93xEijNLhrSkvJsFi68KvRM7XH5BvO/FoNqLy88K0wesJMofLw+OwMWiv0 fqQ2j+5QKHI0J28RD1mXb2u+wvB9ho+DRvMgSbXHpqO/D3M77KEvDV+15TvAAktxIeM+9G522y x2RmiKswoH+DPdFLSW82EtWrE4xNIiFQFmPPFV8ZEK8Jf2YvwJVZvbcDgP1hHni4eyEPDO0Cx2 zuB+R6bYEKdAy46HyZwx/ZS74YR9t5W1T31VWUAHFhnTXbU19rMdAufMn1D1zryctxr+0gSBY6 Izo=
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This is code movement in order to simply further changes.

No functional change intended.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Changes since v2:
 - Drop one of the leading underscores from __hvm_dpci_eoi.

Changes since v1:
 - New in this version.
---
 xen/drivers/passthrough/x86/hvm.c | 164 +++++++++++++++---------------
 1 file changed, 82 insertions(+), 82 deletions(-)

diff --git a/xen/drivers/passthrough/x86/hvm.c 
b/xen/drivers/passthrough/x86/hvm.c
index 2331af896d4..ecc7d66e600 100644
--- a/xen/drivers/passthrough/x86/hvm.c
+++ b/xen/drivers/passthrough/x86/hvm.c
@@ -205,6 +205,88 @@ static struct vcpu *vector_hashing_dest(const struct 
domain *d,
     return dest;
 }
 
+static void hvm_pirq_eoi(struct pirq *pirq)
+{
+    struct hvm_pirq_dpci *pirq_dpci;
+
+    if ( !pirq )
+    {
+        ASSERT_UNREACHABLE();
+        return;
+    }
+
+    pirq_dpci = pirq_dpci(pirq);
+
+    /*
+     * No need to get vector lock for timer
+     * since interrupt is still not EOIed
+     */
+    if ( --pirq_dpci->pending ||
+         /* When the interrupt source is MSI no Ack should be performed. */
+         (pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE) )
+        return;
+
+    pirq_guest_eoi(pirq);
+}
+
+static void _hvm_dpci_eoi(struct domain *d,
+                          const struct hvm_girq_dpci_mapping *girq)
+{
+    struct pirq *pirq = pirq_info(d, girq->machine_gsi);
+
+    if ( !hvm_domain_use_pirq(d, pirq) )
+        hvm_pci_intx_deassert(d, girq->device, girq->intx);
+
+    hvm_pirq_eoi(pirq);
+}
+
+static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
+{
+    struct pirq *pirq = pirq_info(d, gsi);
+
+    /* Check if GSI is actually mapped. */
+    if ( !pirq_dpci(pirq) )
+        return;
+
+    hvm_gsi_deassert(d, gsi);
+    hvm_pirq_eoi(pirq);
+}
+
+void hvm_dpci_eoi(unsigned int guest_gsi)
+{
+    struct domain *d = current->domain;
+    const struct hvm_irq_dpci *hvm_irq_dpci;
+    const struct hvm_girq_dpci_mapping *girq;
+
+    if ( !is_iommu_enabled(d) )
+        return;
+
+    if ( is_hardware_domain(d) )
+    {
+        spin_lock(&d->event_lock);
+        hvm_gsi_eoi(d, guest_gsi);
+        goto unlock;
+    }
+
+    if ( guest_gsi < NR_ISAIRQS )
+    {
+        hvm_dpci_isairq_eoi(d, guest_gsi);
+        return;
+    }
+
+    spin_lock(&d->event_lock);
+    hvm_irq_dpci = domain_get_irq_dpci(d);
+
+    if ( !hvm_irq_dpci )
+        goto unlock;
+
+    list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
+        _hvm_dpci_eoi(d, girq);
+
+unlock:
+    spin_unlock(&d->event_lock);
+}
+
 int pt_irq_create_bind(
     struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
 {
@@ -860,88 +942,6 @@ static void hvm_dirq_assist(struct domain *d, struct 
hvm_pirq_dpci *pirq_dpci)
     spin_unlock(&d->event_lock);
 }
 
-static void hvm_pirq_eoi(struct pirq *pirq)
-{
-    struct hvm_pirq_dpci *pirq_dpci;
-
-    if ( !pirq )
-    {
-        ASSERT_UNREACHABLE();
-        return;
-    }
-
-    pirq_dpci = pirq_dpci(pirq);
-
-    /*
-     * No need to get vector lock for timer
-     * since interrupt is still not EOIed
-     */
-    if ( --pirq_dpci->pending ||
-         /* When the interrupt source is MSI no Ack should be performed. */
-         (pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE) )
-        return;
-
-    pirq_guest_eoi(pirq);
-}
-
-static void __hvm_dpci_eoi(struct domain *d,
-                           const struct hvm_girq_dpci_mapping *girq)
-{
-    struct pirq *pirq = pirq_info(d, girq->machine_gsi);
-
-    if ( !hvm_domain_use_pirq(d, pirq) )
-        hvm_pci_intx_deassert(d, girq->device, girq->intx);
-
-    hvm_pirq_eoi(pirq);
-}
-
-static void hvm_gsi_eoi(struct domain *d, unsigned int gsi)
-{
-    struct pirq *pirq = pirq_info(d, gsi);
-
-    /* Check if GSI is actually mapped. */
-    if ( !pirq_dpci(pirq) )
-        return;
-
-    hvm_gsi_deassert(d, gsi);
-    hvm_pirq_eoi(pirq);
-}
-
-void hvm_dpci_eoi(unsigned int guest_gsi)
-{
-    struct domain *d = current->domain;
-    const struct hvm_irq_dpci *hvm_irq_dpci;
-    const struct hvm_girq_dpci_mapping *girq;
-
-    if ( !is_iommu_enabled(d) )
-        return;
-
-    if ( is_hardware_domain(d) )
-    {
-        spin_lock(&d->event_lock);
-        hvm_gsi_eoi(d, guest_gsi);
-        goto unlock;
-    }
-
-    if ( guest_gsi < NR_ISAIRQS )
-    {
-        hvm_dpci_isairq_eoi(d, guest_gsi);
-        return;
-    }
-
-    spin_lock(&d->event_lock);
-    hvm_irq_dpci = domain_get_irq_dpci(d);
-
-    if ( !hvm_irq_dpci )
-        goto unlock;
-
-    list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
-        __hvm_dpci_eoi(d, girq);
-
-unlock:
-    spin_unlock(&d->event_lock);
-}
-
 static int pci_clean_dpci_irq(struct domain *d,
                               struct hvm_pirq_dpci *pirq_dpci, void *arg)
 {
-- 
2.30.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.