WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merge

ChangeSet 1.1159.170.110, 2005/04/05 12:27:50+01:00, sd386@xxxxxxxxxxxxxxxxx

        Merge



 tools/libxc/Makefile                      |    1 
 tools/python/xen/xend/server/SrvDomain.py |   11 +++
 xen/common/schedule.c                     |   95 ++++++++++++++++++++++++++++--
 3 files changed, 102 insertions(+), 5 deletions(-)


diff -Nru a/tools/libxc/Makefile b/tools/libxc/Makefile
--- a/tools/libxc/Makefile      2005-05-09 14:06:27 -04:00
+++ b/tools/libxc/Makefile      2005-05-09 14:06:27 -04:00
@@ -16,6 +16,7 @@
 INCLUDES += -I $(XEN_LIBXUTIL)
 
 SRCS     :=
+SRCS     += xc_sedf.c
 SRCS     += xc_atropos.c
 SRCS     += xc_bvtsched.c
 SRCS     += xc_domain.c
diff -Nru a/tools/python/xen/xend/server/SrvDomain.py 
b/tools/python/xen/xend/server/SrvDomain.py
--- a/tools/python/xen/xend/server/SrvDomain.py 2005-05-09 14:06:27 -04:00
+++ b/tools/python/xen/xend/server/SrvDomain.py 2005-05-09 14:06:27 -04:00
@@ -116,6 +116,17 @@
                      ['xtratime', 'int']])
         val = fn(req.args, {'dom': self.dom.id})
         return val
+    
+    def op_cpu_sedf_set(self, op, req):
+        fn = FormFn(self.xd.domain_cpu_sedf_set,
+                    [['dom', 'str'],
+                     ['period', 'int'],
+                     ['slice', 'int'],
+                    ['latency', 'int'],
+                    ['extratime', 'int'],
+                    ['weight', 'int']])
+        val = fn(req.args, {'dom': self.dom.id})
+        return val
 
     def op_maxmem_set(self, op, req):
         fn = FormFn(self.xd.domain_maxmem_set,
diff -Nru a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c     2005-05-09 14:06:27 -04:00
+++ b/xen/common/schedule.c     2005-05-09 14:06:27 -04:00
@@ -34,6 +34,8 @@
 
 /*#define WAKE_HISTO*/
 /*#define BLOCKTIME_HISTO*/
+/*#define ADV_SCHED_HISTO*/
+//#include <xen/adv_sched_hist.h>
 
 #if defined(WAKE_HISTO)
 #define BUCKETS 31
@@ -72,10 +74,12 @@
 extern struct scheduler sched_bvt_def;
 extern struct scheduler sched_rrobin_def;
 extern struct scheduler sched_atropos_def;
+extern struct scheduler sched_sedf_def;
 static struct scheduler *schedulers[] = { 
     &sched_bvt_def,
     &sched_rrobin_def,
     &sched_atropos_def,
+    &sched_sedf_def,
     NULL
 };
 
@@ -192,6 +196,10 @@
 /* Block the currently-executing domain until a pertinent event occurs. */
 long do_block(void)
 {
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_start(current->processor);
+#endif
+
     ASSERT(current->id != IDLE_DOMAIN_ID);
     current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
     set_bit(DF_BLOCKED, &current->flags);
@@ -203,6 +211,10 @@
 /* Voluntarily yield the processor for this allocation. */
 static long do_yield(void)
 {
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_start(current->processor);
+#endif
+    
     TRACE_2D(TRC_SCHED_YIELD, current->id, current);
     __enter_scheduler();
     return 0;
@@ -285,7 +297,7 @@
 
     if ( cmd->sched_id != ops.sched_id )
         return -EINVAL;
-
+    
     if ( cmd->direction != SCHED_INFO_PUT && cmd->direction != SCHED_INFO_GET )
         return -EINVAL;
 
@@ -319,8 +331,14 @@
     perfc_incrc(sched_run);
     
     spin_lock_irq(&schedule_data[cpu].schedule_lock);
- 
+
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_from_stop(cpu);
+#endif
     now = NOW();
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_start(cpu);
+#endif
 
     rem_ac_timer(&schedule_data[cpu].s_timer);
     
@@ -356,9 +374,12 @@
 
     spin_unlock_irq(&schedule_data[cpu].schedule_lock);
 
-    if ( unlikely(prev == next) )
+    if ( unlikely(prev == next) ) {
+#ifdef ADV_SCHED_HISTO
+        adv_sched_hist_to_stop(cpu);
+#endif
         return;
-    
+    }
     perfc_incrc(sched_ctx);
 
     cleanup_writable_pagetable(prev);
@@ -382,7 +403,6 @@
 #endif
 
     TRACE_2D(TRC_SCHED_SWITCH, next->id, next);
-
     switch_to(prev, next);
 
     /*
@@ -397,6 +417,10 @@
     if ( !is_idle_task(next) && update_dom_time(next) )
         send_guest_virq(next, VIRQ_TIMER);
 
+
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_to_stop(cpu);
+#endif
     schedule_tail(next);
 
     BUG();
@@ -420,6 +444,10 @@
 /* The scheduler timer: force a run through the scheduler*/
 static void s_timer_fn(unsigned long unused)
 {
+#ifdef ADV_SCHED_HISTO
+    adv_sched_hist_start(current->processor);
+#endif
+
     TRACE_0D(TRC_SCHED_S_TIMER_FN);
     raise_softirq(SCHEDULE_SOFTIRQ);
     perfc_incrc(sched_irq);
@@ -560,6 +588,63 @@
             schedule_data[j].hist[i] = 0;
 }
 #else
+#if defined(ADV_SCHED_HISTO)
+void print_sched_histo(unsigned char key)
+{
+    int i, j, k,t;
+    printf("Hello!\n");
+    for ( k = 0; k < smp_num_cpus; k++ )
+    {
+        j = 0;
+       t = 0;
+        printf ("CPU[%02d]: scheduler latency histogram FROM (ms:[count])\n", 
k);
+        for ( i = 0; i < BUCKETS; i++ )
+        {
+            //if ( schedule_data[k].hist[i] != 0 )
+            {
+               t += schedule_data[k].from_hist[i];
+                if ( i < BUCKETS-1 )
+                    printk("%3d:[%7u]    ", i, schedule_data[k].from_hist[i]);
+                else
+                    printk(" >:[%7u]    ", schedule_data[k].from_hist[i]);
+                //if ( !(++j % 5) )
+                    printk("\n");
+            }
+        }
+        printk("\nTotal: %i\n",t);
+    }
+    for ( k = 0; k < smp_num_cpus; k++ )
+    {
+        j = 0; t = 0;
+        printf ("CPU[%02d]: scheduler latency histogram TO (ms:[count])\n", k);
+        for ( i = 0; i < BUCKETS; i++ )
+        {
+            //if ( schedule_data[k].hist[i] != 0 )
+            {
+               t += schedule_data[k].from_hist[i];
+                if ( i < BUCKETS-1 )
+                    printk("%3d:[%7u]    ", i, schedule_data[k].to_hist[i]);
+                else
+                    printk(" >:[%7u]    ", schedule_data[k].to_hist[i]);
+                //if ( !(++j % 5) )
+                    printk("\n");
+            }
+        }
+       printk("\nTotal: %i\n",t);
+    }
+      
+}
+void reset_sched_histo(unsigned char key)
+{
+    int i, j;
+    for ( j = 0; j < smp_num_cpus; j++ ) {
+        for ( i=0; i < BUCKETS; i++ ) 
+            schedule_data[j].to_hist[i] = schedule_data[j].from_hist[i] = 0;
+        schedule_data[j].save_tsc = 0;
+    }
+}
+#else
 void print_sched_histo(unsigned char key) { }
 void reset_sched_histo(unsigned char key) { }
+#endif
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>