[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] sched: print information about scheduler granularity



Currently it might be not obvious which scheduling mode is being used
by the scheduler. Alleviate this by printing additional information
about the selected granularity. Messages now look like these:

1. boot
(XEN) [00089808f0ea7496] Using scheduler: SMP Credit Scheduler (credit) in 
core-scheduling mode

2. xl debug-keys r
(XEN) [   45.914314] Scheduler: SMP Credit Scheduler (credit) in 2-way 
core-scheduling mode

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
CC: Juergen Gross <jgross@xxxxxxxx>
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxx>
CC: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/common/sched/core.c    | 10 ++++++++--
 xen/common/sched/cpupool.c | 30 +++++++++++++++++++++++++++++-
 xen/common/sched/private.h |  2 ++
 3 files changed, 39 insertions(+), 3 deletions(-)

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index d4a6489929..b1b09a159b 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -2883,6 +2883,7 @@ void scheduler_enable(void)
 void __init scheduler_init(void)
 {
     struct domain *idle_domain;
+    char sched_gran[20];
     int i;
 
     scheduler_enable();
@@ -2937,7 +2938,9 @@ void __init scheduler_init(void)
         BUG();
     register_cpu_notifier(&cpu_schedule_nfb);
 
-    printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
+    printk("Using scheduler: %s (%s) in %s-scheduling mode\n",
+           ops.name, ops.opt_name,
+           sched_gran_str(sched_gran, sizeof(sched_gran)));
     if ( sched_init(&ops) )
         panic("scheduler returned error on init\n");
 
@@ -3267,6 +3270,7 @@ void schedule_dump(struct cpupool *c)
     unsigned int      i, j;
     struct scheduler *sched;
     cpumask_t        *cpus;
+    char              sched_gran[20];
 
     /* Locking, if necessary, must be handled withing each scheduler */
 
@@ -3276,7 +3280,9 @@ void schedule_dump(struct cpupool *c)
     {
         sched = c->sched;
         cpus = c->res_valid;
-        printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
+        printk("Scheduler: %s (%s) in %s-scheduling mode\n",
+               sched->name, sched->opt_name,
+               sched_gran_str(sched_gran, sizeof(sched_gran)));
         sched_dump_settings(sched);
     }
     else
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index d40345b585..a37b97f4c2 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -38,7 +38,35 @@ static cpumask_t cpupool_locked_cpus;
 static DEFINE_SPINLOCK(cpupool_lock);
 
 static enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
-static unsigned int __read_mostly sched_granularity = 1;
+static unsigned int __read_mostly sched_granularity;
+
+char *sched_gran_str(char *str, size_t size)
+{
+    char *mode = "";
+
+    switch ( opt_sched_granularity )
+    {
+    case SCHED_GRAN_cpu:
+        mode = "cpu";
+        break;
+    case SCHED_GRAN_core:
+        mode = "core";
+        break;
+    case SCHED_GRAN_socket:
+        mode = "socket";
+        break;
+    default:
+        ASSERT_UNREACHABLE();
+        break;
+    }
+
+    if ( sched_granularity )
+        snprintf(str, size, "%u-way %s", sched_granularity, mode);
+    else
+        snprintf(str, size, "%s", mode);
+
+    return str;
+}
 
 #ifdef CONFIG_HAS_SCHED_GRANULARITY
 static int __init sched_select_granularity(const char *str)
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index 367811a12f..fd49f545cb 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -30,6 +30,8 @@ enum sched_gran {
     SCHED_GRAN_socket
 };
 
+char *sched_gran_str(char *str, size_t size);
+
 /*
  * In order to allow a scheduler to remap the lock->cpu mapping,
  * we have a per-cpu pointer, along with a pre-allocated set of
-- 
2.17.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.