[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 2/2] xen: credit2: cached attribute for next runqueue entry



The patch introduces a cached attribute (next_elem) in csched2_runqueue_data
which will keep track of next runq candidate which has the maximum credit
score within the runqueue. This will save unnecessary tree traversal during
the time of scheduling. This element will be populated during the removal and
addition of the vcpus in the runqueue.

Signed-off-by: Praveen Kumar <kpraveen.lkml@xxxxxxxxx>
---
Changed since v1:
 * Newly introduced patch
---
 xen/common/sched_credit2.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 2463a25f87..caa65f37c1 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -473,6 +473,7 @@ struct csched2_runqueue_data {
     spinlock_t lock;           /* Lock for this runqueue                     */
 
     struct rb_root runq;       /* Runqueue is an rbtree                      */
+    struct rb_node *next_elem; /* Cached entry to run next                   */
     int id;                    /* ID of this runqueue (-1 if invalid)        */
 
     int load;                  /* Instantaneous load (num of non-idle vcpus) */
@@ -1301,6 +1302,7 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
 {
     unsigned int cpu = svc->vcpu->processor;
     struct rb_root *runq = &c2rqd(ops, cpu)->runq;
+    struct csched2_runqueue_data *rqd = svc->rqd;
     int pos = 0;
 
     ASSERT(spin_is_locked(per_cpu(schedule_data, cpu).schedule_lock));
@@ -1314,6 +1316,7 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
     ASSERT(!(svc->flags & CSFLAG_scheduled));
 
     rb_runq_insert(runq, svc, &pos);
+    rqd->next_elem = rb_last(runq);
 
     if ( unlikely(tb_init_done) )
     {
@@ -1332,9 +1335,13 @@ runq_insert(const struct scheduler *ops, struct 
csched2_vcpu *svc)
 
 static inline void runq_remove(struct csched2_vcpu *svc)
 {
+    struct csched2_runqueue_data *rqd = svc->rqd;
+
     ASSERT(vcpu_on_runq(svc));
     rb_erase(&svc->runq_elem, &svc->rqd->runq);
     RB_CLEAR_NODE(&svc->runq_elem);
+
+    rqd->next_elem = rb_last(&rqd->runq);
 }
 
 void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, 
s_time_t);
@@ -3230,9 +3237,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
     if ( ! RB_EMPTY_ROOT(runq) )
     {
         // Find the left most element, which is the most probable candidate
-        struct rb_node *node = rb_last(runq);
-
-        struct csched2_vcpu *swait = runq_elem(node);
+        struct csched2_vcpu *swait = runq_elem(rqd->next_elem);
         if ( ! is_idle_vcpu(swait->vcpu)
              && swait->credit > 0 )
         {
@@ -3372,7 +3377,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
         snext = csched2_vcpu(idle_vcpu[cpu]);
 
  check_runq:
-    for (iter = rb_last(&rqd->runq); iter != NULL; iter = rb_prev(iter)) {
+    for (iter = rqd->next_elem; iter != NULL; iter = rb_prev(iter)) {
         struct csched2_vcpu * svc = rb_entry(iter, struct csched2_vcpu, 
runq_elem);
 
         if ( unlikely(tb_init_done) )
-- 
2.13.7


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.