|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [RFC PATCH v1 6/8] xen: sched: Credit2 group-scheduling: selecting next vcpu to run
When chosing which vcpu to run next, on a CPU which is in a group where
other vcpus are running already, only consider vcpus of the same domain
(of those vcpus that are running already!).
This is as easy as, in runq_candidate(), while traversing the runqueue,
skipping the vcpus that do not satisfy the group-scheduling constraints.
And now that such constraints are actually enforced, also add an ASSERT()
that checks that we really respect them.
Signed-off-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
---
TODO:
- Consider better the interactions between group-scheduling and
soft-affinity (in runq_candidate() @3481);
---
xen/common/sched_credit2.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 43 insertions(+), 1 deletion(-)
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index b11713e244..052e050394 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3414,7 +3414,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
/*
* Find a candidate.
*/
-static struct csched2_vcpu *
+static noinline struct csched2_vcpu *
runq_candidate(struct csched2_runqueue_data *rqd,
struct csched2_vcpu *scurr,
int cpu, s_time_t now,
@@ -3423,8 +3423,19 @@ runq_candidate(struct csched2_runqueue_data *rqd,
struct list_head *iter, *temp;
struct csched2_vcpu *snext = NULL;
struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
+ struct csched2_grpsched_data *gscd = c2gscd(cpu);
bool yield = false, soft_aff_preempt = false;
+ /*
+ * Some more sanity checking. With group scheduling enabled, either:
+ * - the whole coscheduling group is currently idle. Or,
+ * - this CPU is currently idle. Or,
+ * - this CPU is running a vcpu from the same domain of all the
+ * other one that are running in the group (if any).
+ */
+ ASSERT(!grpsched_enabled() || gscd->sdom == NULL ||
+ scurr->sdom == NULL || gscd->sdom == scurr->sdom);
+
*skipped = 0;
if ( unlikely(is_idle_vcpu(scurr->vcpu)) )
@@ -3473,6 +3484,8 @@ runq_candidate(struct csched2_runqueue_data *rqd,
{
cpumask_t *online = cpupool_domain_cpumask(scurr->vcpu->domain);
+ /* XXX deal with grpsched_enabled() == true */
+
/* Ok, is any of the pcpus in scurr soft-affinity idle? */
cpumask_and(cpumask_scratch, cpumask_scratch, &rqd->idle);
cpumask_andnot(cpumask_scratch, cpumask_scratch, &rqd->tickled);
@@ -3528,6 +3541,23 @@ runq_candidate(struct csched2_runqueue_data *rqd,
continue;
}
+ /*
+ * If groups scheduling is enabled, only consider svc if:
+ * - the whole group is idle. Or,
+ * - one or more other svc->sdom's vcpus are running already in the
+ * pCPUs of the coscheduling group. Or,
+ * - there is only one vcpu running in the whole coscheduling group,
+ * and it is running here on this CPU (and svc would preempt it).
+ */
+ if ( grpsched_enabled() &&
+ gscd->sdom != NULL && gscd->sdom != svc->sdom &&
+ !(gscd->nr_running == 1 && scurr->sdom != NULL) )
+ {
+ ASSERT(gscd->nr_running != 0);
+ (*skipped)++;
+ continue;
+ }
+
/*
* If a vcpu is meant to be picked up by another processor, and such
* processor has not scheduled yet, leave it in the runqueue for him.
@@ -3715,6 +3745,18 @@ csched2_schedule(
runq_remove(snext);
__set_bit(__CSFLAG_scheduled, &snext->flags);
+ /*
+ * If group scheduling is enabled, and we're switching to
+ * a non-idle vcpu, either:
+ * - they're from the same domain,
+ * - the whole coscheduling group was idle,
+ * - there was only 1 vcpu running in the whole scheduling group,
+ * and it was running on this CPU (i.e., this CPU was not idle).
+ */
+ ASSERT(!grpsched_enabled() || gscd->sdom == snext->sdom ||
+ (gscd->nr_running == 0 && gscd->sdom == NULL) ||
+ (gscd->nr_running == 1 && !is_idle_vcpu(scurr->vcpu)));
+
/* Track which domain is running in the coscheduling group */
gscd->sdom = snext->sdom;
if ( is_idle_vcpu(scurr->vcpu) )
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |