|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2 1/1] Improved RTDS scheduler
Budget replenishment is now handled by a dedicated timer which is
triggered at the most imminent release time of all runnable vcpus.
Changes since V1:
None
Signed-off-by: Tianyang Chen <tiche@xxxxxxxxxxxxxx>
Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
Signed-off-by: Dagaen Golomb <dgolomb@xxxxxxxxxxxxxx>
---
xen/common/sched_rt.c | 159 +++++++++++++++++++++++++++++--------------------
1 file changed, 95 insertions(+), 64 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..d522272 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -16,6 +16,7 @@
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
+#include <xen/timer.h>
#include <xen/perfc.h>
#include <xen/sched-if.h>
#include <xen/softirq.h>
@@ -147,6 +148,16 @@ static unsigned int nr_rt_ops;
* Global lock is referenced by schedule_data.schedule_lock from all
* physical cpus. It can be grabbed via vcpu_schedule_lock_irq()
*/
+
+/* dedicated timer for replenishment */
+static struct timer repl_timer;
+
+/* controls when to first start the timer*/
+static int timer_started;
+
+/* handler for the replenishment timer */
+static void repl_handler(void *data);
+
struct rt_private {
spinlock_t lock; /* the global coarse grand lock */
struct list_head sdom; /* list of availalbe domains, used for dump */
@@ -426,6 +437,7 @@ __runq_insert(const struct scheduler *ops, struct rt_vcpu
*svc)
static int
rt_init(struct scheduler *ops)
{
+ const int cpu = smp_processor_id();
struct rt_private *prv = xzalloc(struct rt_private);
printk("Initializing RTDS scheduler\n"
@@ -454,6 +466,8 @@ rt_init(struct scheduler *ops)
ops->sched_data = prv;
+ init_timer(&repl_timer, repl_handler, ops, 0);
+
return 0;
no_mem:
@@ -473,6 +487,9 @@ rt_deinit(const struct scheduler *ops)
xfree(_cpumask_scratch);
_cpumask_scratch = NULL;
}
+
+ kill_timer(&repl_timer);
+
xfree(prv);
}
@@ -635,6 +652,13 @@ rt_vcpu_insert(const struct scheduler *ops, struct vcpu
*vc)
/* add rt_vcpu svc to scheduler-specific vcpu list of the dom */
list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu);
+
+ if(!timer_started)
+ {
+ /* the first vcpu starts the timer for the first time*/
+ timer_started = 1;
+ set_timer(&repl_timer,svc->cur_deadline);
+ }
}
/*
@@ -792,44 +816,6 @@ __runq_pick(const struct scheduler *ops, const cpumask_t
*mask)
}
/*
- * Update vcpu's budget and
- * sort runq by insert the modifed vcpu back to runq
- * lock is grabbed before calling this function
- */
-static void
-__repl_update(const struct scheduler *ops, s_time_t now)
-{
- struct list_head *runq = rt_runq(ops);
- struct list_head *depletedq = rt_depletedq(ops);
- struct list_head *iter;
- struct list_head *tmp;
- struct rt_vcpu *svc = NULL;
-
- list_for_each_safe(iter, tmp, runq)
- {
- svc = __q_elem(iter);
- if ( now < svc->cur_deadline )
- break;
-
- rt_update_deadline(now, svc);
- /* reinsert the vcpu if its deadline is updated */
- __q_remove(svc);
- __runq_insert(ops, svc);
- }
-
- list_for_each_safe(iter, tmp, depletedq)
- {
- svc = __q_elem(iter);
- if ( now >= svc->cur_deadline )
- {
- rt_update_deadline(now, svc);
- __q_remove(svc); /* remove from depleted queue */
- __runq_insert(ops, svc); /* add to runq */
- }
- }
-}
-
-/*
* schedule function for rt scheduler.
* The lock is already grabbed in schedule.c, no need to lock here
*/
@@ -848,7 +834,6 @@ rt_schedule(const struct scheduler *ops, s_time_t now,
bool_t tasklet_work_sched
/* burn_budget would return for IDLE VCPU */
burn_budget(ops, scurr, now);
- __repl_update(ops, now);
if ( tasklet_work_scheduled )
{
@@ -889,7 +874,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now,
bool_t tasklet_work_sched
}
}
- ret.time = MIN(snext->budget, MAX_SCHEDULE); /* sched quantum */
+ ret.time = snext->budget; /* invoke the scheduler next time */
ret.task = snext->vcpu;
/* TRACE */
@@ -1033,10 +1018,6 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu
*vc)
{
struct rt_vcpu * const svc = rt_vcpu(vc);
s_time_t now = NOW();
- struct rt_private *prv = rt_priv(ops);
- struct rt_vcpu *snext = NULL; /* highest priority on RunQ */
- struct rt_dom *sdom = NULL;
- cpumask_t *online;
BUG_ON( is_idle_vcpu(vc) );
@@ -1074,14 +1055,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu
*vc)
/* insert svc to runq/depletedq because svc is not in queue now */
__runq_insert(ops, svc);
- __repl_update(ops, now);
-
- ASSERT(!list_empty(&prv->sdom));
- sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
- snext = __runq_pick(ops, online); /* pick snext from ALL valid cpus */
-
- runq_tickle(ops, snext);
+ runq_tickle(ops, svc);
return;
}
@@ -1094,10 +1068,6 @@ static void
rt_context_saved(const struct scheduler *ops, struct vcpu *vc)
{
struct rt_vcpu *svc = rt_vcpu(vc);
- struct rt_vcpu *snext = NULL;
- struct rt_dom *sdom = NULL;
- struct rt_private *prv = rt_priv(ops);
- cpumask_t *online;
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
clear_bit(__RTDS_scheduled, &svc->flags);
@@ -1108,15 +1078,8 @@ rt_context_saved(const struct scheduler *ops, struct
vcpu *vc)
if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
likely(vcpu_runnable(vc)) )
{
+ /* only insert the pre-empted vcpu back*/
__runq_insert(ops, svc);
- __repl_update(ops, NOW());
-
- ASSERT(!list_empty(&prv->sdom));
- sdom = list_entry(prv->sdom.next, struct rt_dom, sdom_elem);
- online = cpupool_scheduler_cpumask(sdom->dom->cpupool);
- snext = __runq_pick(ops, online); /* pick snext from ALL cpus */
-
- runq_tickle(ops, snext);
}
out:
vcpu_schedule_unlock_irq(lock, vc);
@@ -1167,6 +1130,74 @@ rt_dom_cntl(
return rc;
}
+static void repl_handler(void *data){
+ unsigned long flags;
+ s_time_t now = NOW();
+ s_time_t min_repl = LONG_MAX; /* max time used in comparison*/
+ struct scheduler *ops = data;
+ struct rt_private *prv = rt_priv(ops);
+ struct list_head *runq = rt_runq(ops);
+ struct list_head *depletedq = rt_depletedq(ops);
+ struct list_head *iter;
+ struct list_head *tmp;
+ struct rt_vcpu *svc = NULL;
+
+ spin_lock_irqsave(&prv->lock,flags);
+
+ stop_timer(&repl_timer);
+
+ list_for_each_safe(iter, tmp, runq)
+ {
+ svc = __q_elem(iter);
+ if ( now < svc->cur_deadline )
+ break;
+ rt_update_deadline(now, svc);
+ /* scan the runq to find the min release time
+ * this happens when vcpus on runq miss deadline
+ */
+ if( min_repl> svc->cur_deadline )
+ {
+ min_repl = svc->cur_deadline;
+ }
+ /* reinsert the vcpu if its deadline is updated */
+ __q_remove(svc);
+ __runq_insert(ops, svc);
+ }
+
+ list_for_each_safe(iter, tmp, depletedq)
+ {
+ svc = __q_elem(iter);
+ if ( now >= svc->cur_deadline )
+ {
+ rt_update_deadline(now, svc);
+
+ /* scan the delp_q to find the minimal release time */
+ if(min_repl > svc->cur_deadline)
+ {
+ min_repl = svc->cur_deadline;
+ }
+ __q_remove(svc);
+ __runq_insert(ops, svc);
+ runq_tickle(ops, svc);
+ }
+ }
+
+ /* if timer was triggered but none of the vcpus
+ * need to be refilled, set the timer to be the
+ * default period + now
+ */
+ if(min_repl == LONG_MAX)
+ {
+ set_timer(&repl_timer, now + RTDS_DEFAULT_PERIOD);
+ }
+ else
+ {
+ /* reprogram the timer using the most imminent release time*/
+ set_timer(&repl_timer, min_repl);
+ }
+ spin_unlock_irqrestore(&prv->lock,flags);
+}
+
static struct rt_private _rt_priv;
const struct scheduler sched_rtds_def = {
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |