[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 1/4] xen: add real time scheduler rtds



On Sun, 2014-09-14 at 17:37 -0400, Meng Xu wrote:

> This is an experimental scheduler.
> 
> Signed-off-by: Meng Xu <mengxu@xxxxxxxxxxxxx>
> Signed-off-by: Sisu Xi <xisisu@xxxxxxxxx>
>
So, I jusst finished looking at this patch too, and it really looks fine
this time. Thanks Meng and Sisu for the good work!

I'm leaving a few comments below, but they really are nothing more than
nits, nothing that requires resending, IMO.

That being said, I'd like to quickly test running Xen with this
scheduler, in a few configurations, and I can't access my test boxes
until the afternoon.

I'll give it a go in no more than a few hours and, if everything goes
fine as I think, I'll reply again with the proper tag.

> --- /dev/null
> +++ b/xen/common/sched_rt.c

> +/*
> + * Init/Free related code
> + */
> +static int
> +rt_init(struct scheduler *ops)
> +{
> +    struct rt_private *prv = xzalloc(struct rt_private);
> +
> +    printk("Initializing RTDS scheduler\n" \
> +           "WARNING: This is experimental software in development.\n" \
> +           "Use at your own risk.\n");
>
I don't think you need the '\' in this case...

> +
> +    if ( prv == NULL )
> +        return -ENOMEM;
> +
> +    spin_lock_init(&prv->lock);
> +    INIT_LIST_HEAD(&prv->sdom);
> +    INIT_LIST_HEAD(&prv->runq);
> +    INIT_LIST_HEAD(&prv->depletedq);
> +
> +    cpumask_clear(&prv->tickled);
> +
> +    ops->sched_data = prv;
> +
> +    return 0;
> +}

> +/*
> + * Update vcpu's budget and sort runq by insert the modifed vcpu back to runq
> + * lock is grabbed before calling this function
> + */
> +static void
> +__repl_update(const struct scheduler *ops, s_time_t now)
> +{
> +    struct list_head *runq = rt_runq(ops);
> +    struct list_head *depletedq = rt_depletedq(ops);
> +    struct list_head *iter;
> +    struct list_head *tmp;
> +    struct rt_vcpu *svc = NULL;
> +
> +    list_for_each_safe(iter, tmp, runq)
> +    {
> +        svc = __q_elem(iter);
> +
> +        if ( now >= svc->cur_deadline )
> +        {
> +            rt_update_deadline(now, svc);
> +            /* reinsert the vcpu if its deadline is updated */
> +            __q_remove(svc);
> +            __runq_insert(ops, svc);
> +        }
> +        else
> +            break;
>
Just from an aesthetic perspective, I think I'd have inverted the
condition and, hence, the two brances (i.e., "if ( < ) break; else {}").

But, please, *do* *not* *resend* only for the sake of this!! :-D :-D

> +    }
> +
> +    list_for_each_safe(iter, tmp, depletedq)
> +    {
> +        svc = __q_elem(iter);
> +        if ( now >= svc->cur_deadline )
> +        {
> +            rt_update_deadline(now, svc);
> +            __q_remove(svc); /* remove from depleted queue */
> +            __runq_insert(ops, svc); /* add to runq */
> +        }
> +    }
> +}

> +/*
> + * Pick a cpu where to run a vcpu, possibly kicking out the vcpu running 
> there
> + * Called by wake() and context_saved()
> + * We have a running candidate here, the kick logic is:
> + * Among all the cpus that are within the cpu affinity
> + * 1) if the new->cpu is idle, kick it. This could benefit cache hit
> + * 2) if there are any idle vcpu, kick it.                                   
> + * 3) now all pcpus are busy, among all the running vcpus, pick lowest 
> priority one
>
Long line.

> + *    if snext has higher priority, kick it.
> + *
> + * TODO:
> + * 1) what if these two vcpus belongs to the same domain?
> + *    replace a vcpu belonging to the same domain introduces more overhead
> + *
> + * lock is grabbed before calling this function
> + */
> +static void
> +runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
> +{
> +    struct rt_private *prv = rt_priv(ops);
> +    struct rt_vcpu *latest_deadline_vcpu = NULL;    /* lowest priority 
> scheduled */
> +    struct rt_vcpu *iter_svc;
> +    struct vcpu *iter_vc;
> +    int cpu = 0, cpu_to_tickle = 0;
> +    cpumask_t not_tickled;
> +    cpumask_t *online;
> +
> +    if ( new == NULL || is_idle_vcpu(new->vcpu) )
> +        return;
> +
> +    online = cpupool_scheduler_cpumask(new->vcpu->domain->cpupool);
> +    cpumask_and(&not_tickled, online, new->vcpu->cpu_hard_affinity);
> +    cpumask_andnot(&not_tickled, &not_tickled, &prv->tickled);
> +
> +    /* 1) if new's previous cpu is idle, kick it for cache benefit */
> +    if ( is_idle_vcpu(curr_on_cpu(new->vcpu->processor)) )
> +    {
> +        cpu_to_tickle = new->vcpu->processor;
> +        goto out;
> +    }
> +
> +    /* 2) if there are any idle pcpu, kick it */
> +    /* The same loop also find the one with lowest priority */
> +    for_each_cpu(cpu, &not_tickled)
> +    {
> +        iter_vc = curr_on_cpu(cpu);
> +        if ( is_idle_vcpu(iter_vc) )
> +        {
> +            cpu_to_tickle = cpu;
> +            goto out;
> +        }
> +        iter_svc = rt_vcpu(iter_vc);
> +        if ( latest_deadline_vcpu == NULL ||
> +             iter_svc->cur_deadline > latest_deadline_vcpu->cur_deadline )
> +            latest_deadline_vcpu = iter_svc;
> +    }
> +
> +    /* 3) candicate has higher priority, kick out the lowest priority vcpu */
> +    if ( latest_deadline_vcpu != NULL && new->cur_deadline < 
> latest_deadline_vcpu->cur_deadline )
>
Long line again.

> +    {
> +        cpu_to_tickle = latest_deadline_vcpu->vcpu->processor;
> +        goto out;
> +    }
> +
> +out:
> +    /* TRACE */
> +    {
> +        struct {
> +            unsigned cpu:8, pad:24;
> +        } d;
> +        d.cpu = cpu_to_tickle;
> +        d.pad = 0;
> +        trace_var(TRC_RTDS_TICKLE, 0,
> +                  sizeof(d),
> +                  (unsigned char *)&d);
> +    }
> +
> +    cpumask_set_cpu(cpu_to_tickle, &prv->tickled);
> +    cpu_raise_softirq(cpu_to_tickle, SCHEDULE_SOFTIRQ);
> +    return;
> +}
> +
> +/*
> + * Should always wake up runnable vcpu, put it back to RunQ.
> + * Check priority to raise interrupt
> + * The lock is already grabbed in schedule.c, no need to lock here
> + * TODO: what if these two vcpus belongs to the same domain?
> + */
> +static void
> +rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
> +{
> +    struct rt_vcpu * const svc = rt_vcpu(vc);
> +    s_time_t now = NOW();
> +    struct rt_private *prv = rt_priv(ops);
> +    struct rt_vcpu *snext = NULL;        /* highest priority on RunQ */
> +    struct rt_dom *sdom = NULL;
> +    cpumask_t *online;
> +
> +    BUG_ON( is_idle_vcpu(vc) );
> +
> +    if ( unlikely(curr_on_cpu(vc->processor) == vc) )
> +        return;
> +
> +    /* on RunQ/DepletedQ, just update info is ok */
> +    if ( unlikely(__vcpu_on_q(svc)) )
> +        return;
> +
How frequent is that, that we wake up a running or already woken and
queued vcpu? That should not happen too often, right?

Again, this is fine as it is right now, and there's nothing you should
do. However, and I'm just mentioning it here in order not to forget
about it :-P, in future we may want to add at least a trace point here.

Regards,
Dario

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)

Attachment: signature.asc
Description: This is a digitally signed message part

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.