[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 01/13] x86/IRQ: deal with move-in-progress state in fixup_irqs()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Tue, 16 Jul 2019 07:37:02 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=mBaV6+QYMIjSjouzjXOoqSe+yig3keAv8N89JGSor8Y=; b=XgxkDZslkH7MlTlhC4a2Y73/LzNZ1z2JEt1ZblzTO8Vc3DaK4mKtT9cjzwUl2Wbxz3r3inzY678MgMdJCU09du0IL6H0Wj07TZuyzQpBuqDn6IwRAA2AD9YyHU0DB2uwQx0hrOssxxyFHw3b7kNWyJJUGVexB++r45dWnHTiMhJZMBBBLYex7MaCrZElscoR+8H1wdDzG1gRAYysZ/JPRjfA1feTm23QdXXO8gzRbBfJl0h0+VPDNLLfi2383HD/rLkNgU8Cf+OlFq3ifMHMf3HsYADR4ENyKuf6Q3W92ptXkku0C/BGirIj76B+DuVrZXRcoAHOKNfN6Ei4E+hnQg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=oZcj1s99Elkhn4RlaV5cbb1NDekNUg1YSz1XZ9yG7BIZATJFftfO/mKIs4EABw/6ciQFojY8ewv69hSVGNAotnMEqx+Hh5CiQ8Tw4VU9JA99VjlJrSqM3/R7+I0FBMMMUcykPotL1PkHeiY2JltaxZqprykL9NK0iet+VxD3p4T74mMGXe1GzXXt35getLVojEPMJmBMqs1k6jNsEA/97cSlKLl6HZ+UQeETJ8+uFdgwO+c6a5mON28+CveUoRHDaCPhMNcYk5cxXrFvSyi3214DUFAmEz9bOk6fT0l3Qj19eskv0HRPtd3wPq7sVS6n3M6Iqi5s1IFhGkCga6rJhw==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 16 Jul 2019 07:37:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVO6lCxbMQu2mXuE+5o0875H2ReQ==
  • Thread-topic: [PATCH v4 01/13] x86/IRQ: deal with move-in-progress state in fixup_irqs()

The flag being set may prevent affinity changes, as these often imply
assignment of a new vector. When there's no possible destination left
for the IRQ, the clearing of the flag needs to happen right from
fixup_irqs().

Additionally _assign_irq_vector() needs to avoid setting the flag when
there's no online CPU left in what gets put into ->arch.old_cpu_mask.
The old vector can be released right away in this case.

Also extend the log message about broken affinity to include the new
affinity as well, allowing to notice issues with affinity changes not
actually having taken place. Swap the if/else-if order there at the
same time to reduce the amount of conditions checked.

At the same time replace two open coded instances of the new helper
function.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
v4: Use cpumask_bits() in printk() invocation. Re-base.
v3: Move release_old_vec() further up (so a later patch won't need to).
     Re-base.
v2: Add/use valid_irq_vector().
v1b: Also update vector_irq[] in the code added to fixup_irqs().

--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -99,6 +99,27 @@ void unlock_vector_lock(void)
      spin_unlock(&vector_lock);
  }
  
+static inline bool valid_irq_vector(unsigned int vector)
+{
+    return vector >= FIRST_DYNAMIC_VECTOR && vector <= LAST_HIPRIORITY_VECTOR;
+}
+
+static void release_old_vec(struct irq_desc *desc)
+{
+    unsigned int vector = desc->arch.old_vector;
+
+    desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
+    cpumask_clear(desc->arch.old_cpu_mask);
+
+    if ( !valid_irq_vector(vector) )
+        ASSERT_UNREACHABLE();
+    else if ( desc->arch.used_vectors )
+    {
+        ASSERT(test_bit(vector, desc->arch.used_vectors));
+        clear_bit(vector, desc->arch.used_vectors);
+    }
+}
+
  static void _trace_irq_mask(uint32_t event, int irq, int vector,
                              const cpumask_t *mask)
  {
@@ -295,14 +316,7 @@ static void __clear_irq_vector(int irq)
          per_cpu(vector_irq, cpu)[old_vector] = ~irq;
      }
  
-    desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpumask_clear(desc->arch.old_cpu_mask);
-
-    if ( desc->arch.used_vectors )
-    {
-        ASSERT(test_bit(old_vector, desc->arch.used_vectors));
-        clear_bit(old_vector, desc->arch.used_vectors);
-    }
+    release_old_vec(desc);
  
      desc->arch.move_in_progress = 0;
  }
@@ -527,12 +541,21 @@ next:
          /* Found one! */
          current_vector = vector;
          current_offset = offset;
-        if (old_vector > 0) {
-            desc->arch.move_in_progress = 1;
-            cpumask_copy(desc->arch.old_cpu_mask, desc->arch.cpu_mask);
+
+        if ( old_vector > 0 )
+        {
+            cpumask_and(desc->arch.old_cpu_mask, desc->arch.cpu_mask,
+                        &cpu_online_map);
              desc->arch.old_vector = desc->arch.vector;
+            if ( !cpumask_empty(desc->arch.old_cpu_mask) )
+                desc->arch.move_in_progress = 1;
+            else
+                /* This can happen while offlining a CPU. */
+                release_old_vec(desc);
          }
+
          trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
+
          for_each_cpu(new_cpu, &tmp_mask)
              per_cpu(vector_irq, new_cpu)[vector] = irq;
          desc->arch.vector = vector;
@@ -702,14 +725,8 @@ void irq_move_cleanup_interrupt(struct c
  
          if ( desc->arch.move_cleanup_count == 0 )
          {
-            desc->arch.old_vector = IRQ_VECTOR_UNASSIGNED;
-            cpumask_clear(desc->arch.old_cpu_mask);
-
-            if ( desc->arch.used_vectors )
-            {
-                ASSERT(test_bit(vector, desc->arch.used_vectors));
-                clear_bit(vector, desc->arch.used_vectors);
-            }
+            ASSERT(vector == desc->arch.old_vector);
+            release_old_vec(desc);
          }
  unlock:
          spin_unlock(&desc->lock);
@@ -2409,6 +2426,33 @@ void fixup_irqs(const cpumask_t *mask, b
              continue;
          }
  
+        /*
+         * In order for the affinity adjustment below to be successful, we
+         * need __assign_irq_vector() to succeed. This in particular means
+         * clearing desc->arch.move_in_progress if this would otherwise
+         * prevent the function from succeeding. Since there's no way for the
+         * flag to get cleared anymore when there's no possible destination
+         * left (the only possibility then would be the IRQs enabled window
+         * after this loop), there's then also no race with us doing it here.
+         *
+         * Therefore the logic here and there need to remain in sync.
+         */
+        if ( desc->arch.move_in_progress &&
+             !cpumask_intersects(mask, desc->arch.cpu_mask) )
+        {
+            unsigned int cpu;
+
+            cpumask_and(&affinity, desc->arch.old_cpu_mask, &cpu_online_map);
+
+            spin_lock(&vector_lock);
+            for_each_cpu(cpu, &affinity)
+                per_cpu(vector_irq, cpu)[desc->arch.old_vector] = ~irq;
+            spin_unlock(&vector_lock);
+
+            release_old_vec(desc);
+            desc->arch.move_in_progress = 0;
+        }
+
          cpumask_and(&affinity, &affinity, mask);
          if ( cpumask_empty(&affinity) )
          {
@@ -2427,15 +2471,18 @@ void fixup_irqs(const cpumask_t *mask, b
          if ( desc->handler->enable )
              desc->handler->enable(desc);
  
+        cpumask_copy(&affinity, desc->affinity);
+
          spin_unlock(&desc->lock);
  
          if ( !verbose )
              continue;
  
-        if ( break_affinity && set_affinity )
-            printk("Broke affinity for irq %i\n", irq);
-        else if ( !set_affinity )
-            printk("Cannot set affinity for irq %i\n", irq);
+        if ( !set_affinity )
+            printk("Cannot set affinity for IRQ%u\n", irq);
+        else if ( break_affinity )
+            printk("Broke affinity for IRQ%u, new: %*pb\n",
+                   irq, nr_cpu_ids, cpumask_bits(&affinity));
      }
  
      /* That doesn't seem sufficient.  Give it 1ms. */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.