[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 12/13] x86/IRQ: eliminate some on-stack cpumask_t instances


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Tue, 16 Jul 2019 07:44:26 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=UcP8GYNOpduM3r6dwUuYkp76fE5Zx9qWnYDzPW/DR1k=; b=A5WI7/K67PY9YNiaJFHLERPcz4thJuTsy+BhGIzEfopo97Wnfm5I1lcWYpzxbZuxFjshtnbKlrXO723ow1WWAA4guVfaWYiaBybHdFQPZBaacfEjYdRb3wdPe1cvVUeHth1ANmxna1ZKBweKqYQQUAZCdgOau7VxSoS+wzX8l3IopZDGw3ylBLuIwwSfh/Njl8Ublul2mu7uSNBXxwplYYsIHVIH1zCVRY0NPuWBEyjV77gUQj53/5HPIaph8mRD+5r9YXqkVU+sc4FCCLon58rqVP3hrMdYjFuCvluUWcVejNUzom9P6fr0/Z4zYxcV/0QJUfpSgSbTNprooe2gAg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=gAKmWnXA7QBhsNKIFAdKuOap1DX2B5njWcW5nES9jS2QL2R9MPQOjZAC9gtG35vkuOfa7OgRyym/LKcyTDGwgplw8GsC+aKae848l6WP2HrmJx+8d6dYxzeVtnskaSMoTs8564olKsZm0p6Z6wTbn3vRKEY1dFhkOGJxkkzNTrgIDWMBkuKSQ+v9poLhMAfkHs7mOT6vUlD3H9ZqQVAR3AHE6J0/eyedD1Do+uilaqfsd6oCGTyYh3f4IOAjhKtXBcqd/REUNu2ypd8rGJ9VfmsPK5hujxNbTv6ipabEdaJjyOekxv0yWZ6+IUlCAMyYyLLfkZOE0WHCGhB7L+7CQQ==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 16 Jul 2019 07:48:08 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVO6pKDn1083D/KkaQ2jj4uqEkWw==
  • Thread-topic: [PATCH v4 12/13] x86/IRQ: eliminate some on-stack cpumask_t instances

Use scratch_cpumask where possible, to avoid creating these possibly
large stack objects. We can't use it in _assign_irq_vector() and
set_desc_affinity(), as these get called in IRQ context.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v4: Re-base over changes earlier in the series.
v3: New.

--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -285,14 +285,15 @@ static void _clear_irq_vector(struct irq
  {
      unsigned int cpu, old_vector, irq = desc->irq;
      unsigned int vector = desc->arch.vector;
-    cpumask_t tmp_mask;
+    cpumask_t *tmp_mask = this_cpu(scratch_cpumask);
  
      BUG_ON(!valid_irq_vector(vector));
  
      /* Always clear desc->arch.vector */
-    cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
+    cpumask_and(tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
  
-    for_each_cpu(cpu, &tmp_mask) {
+    for_each_cpu(cpu, tmp_mask)
+    {
          ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
          per_cpu(vector_irq, cpu)[vector] = ~irq;
      }
@@ -308,16 +309,17 @@ static void _clear_irq_vector(struct irq
  
      desc->arch.used = IRQ_UNUSED;
  
-    trace_irq_mask(TRC_HW_IRQ_CLEAR_VECTOR, irq, vector, &tmp_mask);
+    trace_irq_mask(TRC_HW_IRQ_CLEAR_VECTOR, irq, vector, tmp_mask);
  
      if ( likely(!desc->arch.move_in_progress) )
          return;
  
      /* If we were in motion, also clear desc->arch.old_vector */
      old_vector = desc->arch.old_vector;
-    cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
+    cpumask_and(tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
  
-    for_each_cpu(cpu, &tmp_mask) {
+    for_each_cpu(cpu, tmp_mask)
+    {
          ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
          TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
          per_cpu(vector_irq, cpu)[old_vector] = ~irq;
@@ -1169,7 +1171,6 @@ static void irq_guest_eoi_timer_fn(void
      struct irq_desc *desc = data;
      unsigned int i, irq = desc - irq_desc;
      irq_guest_action_t *action;
-    cpumask_t cpu_eoi_map;
  
      spin_lock_irq(&desc->lock);
      
@@ -1206,14 +1207,18 @@ static void irq_guest_eoi_timer_fn(void
  
      switch ( action->ack_type )
      {
+        cpumask_t *cpu_eoi_map;
+
      case ACKTYPE_UNMASK:
          if ( desc->handler->end )
              desc->handler->end(desc, 0);
          break;
+
      case ACKTYPE_EOI:
-        cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
+        cpu_eoi_map = this_cpu(scratch_cpumask);
+        cpumask_copy(cpu_eoi_map, action->cpu_eoi_map);
          spin_unlock_irq(&desc->lock);
-        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
+        on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 0);
          return;
      }
  
@@ -2458,7 +2463,7 @@ void fixup_irqs(const cpumask_t *mask, b
      {
          bool break_affinity = false, set_affinity = true;
          unsigned int vector;
-        cpumask_t affinity;
+        cpumask_t *affinity = this_cpu(scratch_cpumask);
  
          if ( irq == 2 )
              continue;
@@ -2489,9 +2494,9 @@ void fixup_irqs(const cpumask_t *mask, b
          if ( desc->arch.move_cleanup_count )
          {
              /* The cleanup IPI may have got sent while we were still online. 
*/
-            cpumask_andnot(&affinity, desc->arch.old_cpu_mask,
+            cpumask_andnot(affinity, desc->arch.old_cpu_mask,
                             &cpu_online_map);
-            desc->arch.move_cleanup_count -= cpumask_weight(&affinity);
+            desc->arch.move_cleanup_count -= cpumask_weight(affinity);
              if ( !desc->arch.move_cleanup_count )
                  release_old_vec(desc);
          }
@@ -2518,10 +2523,10 @@ void fixup_irqs(const cpumask_t *mask, b
          {
              unsigned int cpu;
  
-            cpumask_and(&affinity, desc->arch.old_cpu_mask, &cpu_online_map);
+            cpumask_and(affinity, desc->arch.old_cpu_mask, &cpu_online_map);
  
              spin_lock(&vector_lock);
-            for_each_cpu(cpu, &affinity)
+            for_each_cpu(cpu, affinity)
                  per_cpu(vector_irq, cpu)[desc->arch.old_vector] = ~irq;
              spin_unlock(&vector_lock);
  
@@ -2532,23 +2537,23 @@ void fixup_irqs(const cpumask_t *mask, b
          if ( !cpumask_intersects(mask, desc->affinity) )
          {
              break_affinity = true;
-            cpumask_setall(&affinity);
+            cpumask_setall(affinity);
          }
          else
-            cpumask_copy(&affinity, desc->affinity);
+            cpumask_copy(affinity, desc->affinity);
  
          if ( desc->handler->disable )
              desc->handler->disable(desc);
  
          if ( desc->handler->set_affinity )
-            desc->handler->set_affinity(desc, &affinity);
+            desc->handler->set_affinity(desc, affinity);
          else if ( !(warned++) )
              set_affinity = false;
  
          if ( desc->handler->enable )
              desc->handler->enable(desc);
  
-        cpumask_copy(&affinity, desc->affinity);
+        cpumask_copy(affinity, desc->affinity);
  
          spin_unlock(&desc->lock);
  
@@ -2559,7 +2564,7 @@ void fixup_irqs(const cpumask_t *mask, b
              printk("Cannot set affinity for IRQ%u\n", irq);
          else if ( break_affinity )
              printk("Broke affinity for IRQ%u, new: %*pb\n",
-                   irq, nr_cpu_ids, cpumask_bits(&affinity));
+                   irq, nr_cpu_ids, cpumask_bits(affinity));
      }
  
      /* That doesn't seem sufficient.  Give it 1ms. */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.