[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 01/10] sched: core: save IRQ state during locking


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Date: Tue, 23 Feb 2021 02:34:55 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=tpKHysdqPazW6fGzt3svNddiygNyG3avYaAeAhXrZRY=; b=lgf4la7IBw82a7bcvFzWNDtkjOTkd5BDBUGnCfEP8P0a5Mov8a1HfC+NiLd4yAQMqy+Ghvq9jzUobb450pwI35r2EfSUz5dJW0KvlLe72Tg+yfAkT+pZAT1xxnirQ7sKXfleocq3X3mmsG0r4MZHsCTVKVHYL7xgBNIQyj+FHjP0F1ke9Dd5HsA+CCemmrc9wEDVc50o0Ijqgp4c8k+H+3+lQ7SQIWYVcQCO63tOXtEsH7efNbFPA3XJAOZmRcYMjqO6fblYMGxwXAQP8OUU68hl1Qp6e6NX/FNUG+Vvpg5RSnu71hNB24TwvkXXeZ0Gmg5Tez+GeI0k3FAUt2kLPg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=ILj3bBHfEiUMCREsS+BKq7fCjQJP3qdoZCtWoK1RvOke0nKZsj9S77E0THWkfH7X/rl8OAtEY0UtqaiuLZyJF6gFWqOg5U7w9Ta5vl4Fkq2B3uWsgJvq1XxEkHuOXladrGRvlvZB3+DTou36EuqCgkk8uy1Un9ceEsT42TYgw51vWUDJlQqYG2/Ke7J5Be24tQanuna8XhcDHk25geHyMTkCJwv2DhS51Yc0QCO9v3j+IaCnJXG4ob4C8Q9yf1t9O/+//HzPpRwxlPvs5IjDdbF98QiBjAsTv3gWoNHxo7LkJynfkc2TNLVYYLvyiTjr10FNnT7ib6Sxc5WFJZZqCw==
  • Authentication-results: lists.xenproject.org; dkim=none (message not signed) header.d=none;lists.xenproject.org; dmarc=none action=none header.from=epam.com;
  • Cc: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Dario Faggioli <dfaggioli@xxxxxxxx>
  • Delivery-date: Tue, 23 Feb 2021 02:35:17 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHXCYx4NBzFu5keaEOeI3LkFrTKiA==
  • Thread-topic: [RFC PATCH 01/10] sched: core: save IRQ state during locking

With XEN preemption enabled, scheduler functions can be called with
IRQs disabled (for example, at end of IRQ handler), so we should
save and restore IRQ state in schedulers code.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>
---
 xen/common/sched/core.c | 33 ++++++++++++++++++---------------
 1 file changed, 18 insertions(+), 15 deletions(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 9745a77eee..7e075613d5 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -2470,7 +2470,8 @@ static struct vcpu *sched_force_context_switch(struct 
vcpu *vprev,
  * sched_res_rculock has been dropped.
  */
 static struct sched_unit *sched_wait_rendezvous_in(struct sched_unit *prev,
-                                                   spinlock_t **lock, int cpu,
+                                                   spinlock_t **lock,
+                                                   unsigned long *flags, int 
cpu,
                                                    s_time_t now)
 {
     struct sched_unit *next;
@@ -2500,7 +2501,7 @@ static struct sched_unit *sched_wait_rendezvous_in(struct 
sched_unit *prev,
                 prev->rendezvous_in_cnt++;
                 atomic_set(&prev->rendezvous_out_cnt, 0);
 
-                pcpu_schedule_unlock_irq(*lock, cpu);
+                pcpu_schedule_unlock_irqrestore(*lock, *flags, cpu);
 
                 sched_context_switch(vprev, v, false, now);
 
@@ -2530,7 +2531,7 @@ static struct sched_unit *sched_wait_rendezvous_in(struct 
sched_unit *prev,
             prev->rendezvous_in_cnt++;
             atomic_set(&prev->rendezvous_out_cnt, 0);
 
-            pcpu_schedule_unlock_irq(*lock, cpu);
+            pcpu_schedule_unlock_irqrestore(*lock, *flags, cpu);
 
             raise_softirq(SCHED_SLAVE_SOFTIRQ);
             sched_context_switch(vprev, vprev, false, now);
@@ -2538,11 +2539,11 @@ static struct sched_unit 
*sched_wait_rendezvous_in(struct sched_unit *prev,
             return NULL;         /* ARM only. */
         }
 
-        pcpu_schedule_unlock_irq(*lock, cpu);
+        pcpu_schedule_unlock_irqrestore(*lock, *flags, cpu);
 
         cpu_relax();
 
-        *lock = pcpu_schedule_lock_irq(cpu);
+        *lock = pcpu_schedule_lock_irqsave(cpu, flags);
 
         /*
          * Check for scheduling resource switched. This happens when we are
@@ -2557,7 +2558,7 @@ static struct sched_unit *sched_wait_rendezvous_in(struct 
sched_unit *prev,
             ASSERT(is_idle_unit(prev));
             atomic_set(&prev->next_task->rendezvous_out_cnt, 0);
             prev->rendezvous_in_cnt = 0;
-            pcpu_schedule_unlock_irq(*lock, cpu);
+            pcpu_schedule_unlock_irqrestore(*lock, *flags, cpu);
             rcu_read_unlock(&sched_res_rculock);
             return NULL;
         }
@@ -2574,12 +2575,13 @@ static void sched_slave(void)
     spinlock_t           *lock;
     bool                  do_softirq = false;
     unsigned int          cpu = smp_processor_id();
+    unsigned long         flags;
 
     ASSERT_NOT_IN_ATOMIC();
 
     rcu_read_lock(&sched_res_rculock);
 
-    lock = pcpu_schedule_lock_irq(cpu);
+    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     now = NOW();
 
@@ -2590,7 +2592,7 @@ static void sched_slave(void)
 
         if ( v )
         {
-            pcpu_schedule_unlock_irq(lock, cpu);
+            pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
             sched_context_switch(vprev, v, false, now);
 
@@ -2602,7 +2604,7 @@ static void sched_slave(void)
 
     if ( !prev->rendezvous_in_cnt )
     {
-        pcpu_schedule_unlock_irq(lock, cpu);
+        pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
         rcu_read_unlock(&sched_res_rculock);
 
@@ -2615,11 +2617,11 @@ static void sched_slave(void)
 
     stop_timer(&get_sched_res(cpu)->s_timer);
 
-    next = sched_wait_rendezvous_in(prev, &lock, cpu, now);
+    next = sched_wait_rendezvous_in(prev, &lock, &flags, cpu, now);
     if ( !next )
         return;
 
-    pcpu_schedule_unlock_irq(lock, cpu);
+    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
     sched_context_switch(vprev, sched_unit2vcpu_cpu(next, cpu),
                          is_idle_unit(next) && !is_idle_unit(prev), now);
@@ -2637,6 +2639,7 @@ static void schedule(void)
     s_time_t              now;
     struct sched_resource *sr;
     spinlock_t           *lock;
+    unsigned long         flags;
     int cpu = smp_processor_id();
     unsigned int          gran;
 
@@ -2646,7 +2649,7 @@ static void schedule(void)
 
     rcu_read_lock(&sched_res_rculock);
 
-    lock = pcpu_schedule_lock_irq(cpu);
+    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
 
     sr = get_sched_res(cpu);
     gran = sr->granularity;
@@ -2657,7 +2660,7 @@ static void schedule(void)
          * We have a race: sched_slave() should be called, so raise a softirq
          * in order to re-enter schedule() later and call sched_slave() now.
          */
-        pcpu_schedule_unlock_irq(lock, cpu);
+        pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
         rcu_read_unlock(&sched_res_rculock);
 
@@ -2676,7 +2679,7 @@ static void schedule(void)
         prev->rendezvous_in_cnt = gran;
         cpumask_andnot(mask, sr->cpus, cpumask_of(cpu));
         cpumask_raise_softirq(mask, SCHED_SLAVE_SOFTIRQ);
-        next = sched_wait_rendezvous_in(prev, &lock, cpu, now);
+        next = sched_wait_rendezvous_in(prev, &lock, &flags, cpu, now);
         if ( !next )
             return;
     }
@@ -2687,7 +2690,7 @@ static void schedule(void)
         atomic_set(&next->rendezvous_out_cnt, 0);
     }
 
-    pcpu_schedule_unlock_irq(lock, cpu);
+    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
 
     vnext = sched_unit2vcpu_cpu(next, cpu);
     sched_context_switch(vprev, vnext,
-- 
2.29.2



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.