[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 4/8] vpci: add a wait operation to the vpci vcpu pending actions


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>
  • Date: Mon, 9 Mar 2026 11:08:26 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=as8HXwLx1Zl9o2iQtPqGLoGRGBXmJbz5GSTKA8wbpEg=; b=txDinyKu++Xk3mujU58GFWCEgby8z98yN6d8TAyvbNE1DluG9ZTNoCltTH2DWv+PHcQ62LestJKNB3qdLvlMdqCptsomW54Vco0clgt3JnPZDXxU0GrgaIHSLL4NhEnm9Bzergq1d9FnAcdATILM0kEtMSXqvGVNyg4jJN3bZcXTA4IU1mf/msVWm4qoacZm5Y4b+mITmJdK+V587V7OVvkAyZUDepSQVTO/1VCzsrPrMKvUncMdtjRwuEfX3uZTKUad8LzONANuANdxd2NbATIpNbRJUzXTVCQfbj/qI6pbt6nRj38uGhbYiQeLdP6nHVF3p43zUs3AefCHwicTDQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=JNvNmbgqDJK2L+vIoJyA85Uf7OEmPgIqHSQjq71ibsxBxaJV/uNZzMWDWczahzoMxf9+/T54BiUq4qYALg1OgRHUlT5tR90ligM7ibKaWvxouoX5ij49BkTi9ALDbmVTOCvbEcwBSmxU1UojQ+o0Nl6ukA0/NQ4qFI193HWJqeFSzKzhR3bBa5g3DLh8x5Sd0NN+NmRZsm6m+qF4wKuoI93E8clFIsJTqffgZodd8cCi7QSauJ1fqqsU/JKE84HeKtDwhCN0QWPhyN5uoqQ+9SKL+FrkNMboqZSPI7AJ698qX2CXTC05ZHhiTy4Rv6ogswmhPPmP8vwh4RYD32geyQ==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Mykyta Poturai <Mykyta_Poturai@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Stewart Hildebrand <stewart.hildebrand@xxxxxxx>
  • Delivery-date: Mon, 09 Mar 2026 11:08:42 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcr7UNjpbdOXJQP0qdir/zrd85oQ==
  • Thread-topic: [PATCH v2 4/8] vpci: add a wait operation to the vpci vcpu pending actions

This allows waiting a specified number of cycles on the vcpu. Once the
wait has finished a callback is executed.

Note that this is still not used, but introduced here in order to
simplify the complexity of the patches that actually make use of it.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Signed-off-by: Mykyta Poturai <mykyta_poturai@xxxxxxxx>
---
v1->v2:
* new patch
---
 xen/drivers/vpci/header.c | 125 ++++++++++++++++++++++----------------
 xen/include/xen/vpci.h    |  19 ++++++
 2 files changed, 90 insertions(+), 54 deletions(-)

diff --git a/xen/drivers/vpci/header.c b/xen/drivers/vpci/header.c
index cb64d9b9fc..284964f0d4 100644
--- a/xen/drivers/vpci/header.c
+++ b/xen/drivers/vpci/header.c
@@ -175,76 +175,92 @@ static void modify_decoding(const struct pci_dev *pdev, 
uint16_t cmd,
 
 bool vpci_process_pending(struct vcpu *v)
 {
-    const struct pci_dev *pdev = v->vpci.pdev;
-    struct vpci_header *header = NULL;
-    unsigned int i;
-
-    if ( !pdev )
-        return false;
-
-    read_lock(&v->domain->pci_lock);
-
-    if ( !pdev->vpci || (v->domain != pdev->domain) )
+    switch ( v->vpci.task )
     {
-        v->vpci.pdev = NULL;
-        read_unlock(&v->domain->pci_lock);
-        return false;
-    }
-
-    header = &pdev->vpci->header;
-    for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
+    case MODIFY_MEMORY:
     {
-        struct vpci_bar *bar = &header->bars[i];
-        struct rangeset *mem = v->vpci.bar_mem[i];
-        struct map_data data = {
-            .d = v->domain,
-            .map = v->vpci.cmd & PCI_COMMAND_MEMORY,
-            .bar = bar,
-        };
-        int rc;
+        const struct pci_dev *pdev = v->vpci.memory.pdev;
+        struct vpci_header *header = NULL;
+        unsigned int i;
 
-        if ( rangeset_is_empty(mem) )
-            continue;
+        if ( !pdev )
+            break;
 
-        rc = rangeset_consume_ranges(mem, map_range, &data);
+        read_lock(&v->domain->pci_lock);
 
-        if ( rc == -ERESTART )
+        if ( !pdev->vpci || (v->domain != pdev->domain) )
         {
+            v->vpci.memory.pdev = NULL;
             read_unlock(&v->domain->pci_lock);
-            return true;
+            break;
         }
 
-        if ( rc )
+        header = &pdev->vpci->header;
+        for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
         {
-            spin_lock(&pdev->vpci->lock);
-            /* Disable memory decoding unconditionally on failure. */
-            modify_decoding(pdev, v->vpci.cmd & ~PCI_COMMAND_MEMORY,
-                            false);
-            spin_unlock(&pdev->vpci->lock);
+            struct vpci_bar *bar = &header->bars[i];
+            struct rangeset *mem = v->vpci.bar_mem[i];
+            struct map_data data = {
+                .d = v->domain,
+                .map = v->vpci.memory.cmd & PCI_COMMAND_MEMORY,
+                .bar = bar,
+            };
+            int rc;
+
+            if ( rangeset_is_empty(mem) )
+                continue;
 
-            /* Clean all the rangesets */
-            for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
-                if ( !rangeset_is_empty(v->vpci.bar_mem[i]) )
-                     rangeset_purge(v->vpci.bar_mem[i]);
+            rc = rangeset_consume_ranges(mem, map_range, &data);
 
-            v->vpci.pdev = NULL;
+            if ( rc == -ERESTART )
+            {
+                read_unlock(&v->domain->pci_lock);
+                return true;
+            }
 
-            read_unlock(&v->domain->pci_lock);
+            if ( rc )
+            {
+                spin_lock(&pdev->vpci->lock);
+                /* Disable memory decoding unconditionally on failure. */
+                modify_decoding(pdev, v->vpci.memory.cmd & ~PCI_COMMAND_MEMORY,
+                                false);
+                spin_unlock(&pdev->vpci->lock);
+
+                /* Clean all the rangesets */
+                for ( i = 0; i < ARRAY_SIZE(header->bars); i++ )
+                    if ( !rangeset_is_empty(v->vpci.bar_mem[i]) )
+                        rangeset_purge(v->vpci.bar_mem[i]);
+
+                v->vpci.memory.pdev = NULL;
+
+                read_unlock(&v->domain->pci_lock);
 
-            if ( !is_hardware_domain(v->domain) )
-                domain_crash(v->domain);
+                if ( !is_hardware_domain(v->domain) )
+                    domain_crash(v->domain);
 
-            return false;
+                break;
+            }
         }
-    }
-    v->vpci.pdev = NULL;
+        v->vpci.memory.pdev = NULL;
 
-    spin_lock(&pdev->vpci->lock);
-    modify_decoding(pdev, v->vpci.cmd, v->vpci.rom_only);
-    spin_unlock(&pdev->vpci->lock);
+        spin_lock(&pdev->vpci->lock);
+        modify_decoding(pdev, v->vpci.memory.cmd, v->vpci.memory.rom_only);
+        spin_unlock(&pdev->vpci->lock);
 
-    read_unlock(&v->domain->pci_lock);
+        read_unlock(&v->domain->pci_lock);
+
+        break;
+    }
+    case WAIT:
+        if ( NOW() < v->vpci.wait.end )
+            return true;
+        v->vpci.wait.callback(v->vpci.wait.data);
+        break;
+    case NONE:
+        return false;
+    }
 
+    v->vpci.task = NONE;
     return false;
 }
 
@@ -295,9 +311,10 @@ static void defer_map(const struct pci_dev *pdev, uint16_t 
cmd, bool rom_only)
      * is mapped. This can lead to parallel mapping operations being
      * started for the same device if the domain is not well-behaved.
      */
-    curr->vpci.pdev = pdev;
-    curr->vpci.cmd = cmd;
-    curr->vpci.rom_only = rom_only;
+    curr->vpci.memory.pdev = pdev;
+    curr->vpci.memory.cmd = cmd;
+    curr->vpci.memory.rom_only = rom_only;
+    curr->vpci.task = MODIFY_MEMORY;
     /*
      * Raise a scheduler softirq in order to prevent the guest from resuming
      * execution with pending mapping operations, to trigger the invocation
diff --git a/xen/include/xen/vpci.h b/xen/include/xen/vpci.h
index fa654545e5..47cdb54d42 100644
--- a/xen/include/xen/vpci.h
+++ b/xen/include/xen/vpci.h
@@ -212,7 +212,26 @@ struct vpci_vcpu {
     /* Per-vcpu structure to store state while {un}mapping of PCI BARs. */
     const struct pci_dev *pdev;
 #ifdef __XEN__
+    enum {
+        NONE,
+        MODIFY_MEMORY,
+        WAIT,
+    } task;
     struct rangeset *bar_mem[PCI_HEADER_NORMAL_NR_BARS + 1];
+    union {
+        struct {
+            /* Store state while {un}mapping of PCI BARs. */
+            const struct pci_dev *pdev;
+            uint16_t cmd;
+            bool rom_only : 1;
+        } memory;
+        struct {
+            /* Store wait state. */
+            s_time_t end;
+            void (*callback)(void *);
+            void *data;
+        } wait;
+    };
 #endif
     uint16_t cmd;
     bool rom_only : 1;
-- 
2.51.2

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.