[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 12/49] xen/sched: introduce struct sched_resource



Add a scheduling abstraction layer between physical processors and the
schedulers by introducing a struct sched_resource. Each scheduler item
running is active on such a scheduler resource. For the time being
there is one struct sched_resource per cpu, but in future there might
be one for each core or socket only.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 xen/common/sched_credit.c  |  2 ++
 xen/common/sched_credit2.c |  7 +++++++
 xen/common/sched_null.c    |  3 +++
 xen/common/sched_rt.c      |  2 ++
 xen/common/schedule.c      | 18 ++++++++++++++++++
 xen/include/xen/sched-if.h |  6 ++++++
 6 files changed, 38 insertions(+)

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index cb8e167fc9..fc068a1c5f 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1040,6 +1040,7 @@ csched_item_insert(const struct scheduler *ops, struct 
sched_item *item)
     lock = vcpu_schedule_lock_irq(vc);
 
     vc->processor = csched_cpu_pick(ops, item);
+    item->res = per_cpu(sched_res, vc->processor);
 
     spin_unlock_irq(lock);
 
@@ -1675,6 +1676,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int 
balance_step)
             WARN_ON(vc->is_urgent);
             runq_remove(speer);
             vc->processor = cpu;
+            vc->sched_item->res = per_cpu(sched_res, cpu);
             /*
              * speer will start executing directly on cpu, without having to
              * go through runq_insert(). So we must update the runnable count
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 9c052c24a7..614d71d948 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2519,6 +2519,7 @@ static void migrate(const struct scheduler *ops,
                     &trqd->active);
         svc->vcpu->processor = cpumask_cycle(trqd->pick_bias,
                                              cpumask_scratch_cpu(cpu));
+        svc->vcpu->sched_item->res = per_cpu(sched_res, svc->vcpu->processor);
         trqd->pick_bias = svc->vcpu->processor;
         ASSERT(svc->vcpu->processor < nr_cpu_ids);
 
@@ -2774,6 +2775,7 @@ csched2_item_migrate(
         }
         _runq_deassign(svc);
         vc->processor = new_cpu;
+        item->res = per_cpu(sched_res, new_cpu);
         return;
     }
 
@@ -2794,7 +2796,10 @@ csched2_item_migrate(
     if ( trqd != svc->rqd )
         migrate(ops, svc, trqd, now);
     else
+    {
         vc->processor = new_cpu;
+        item->res = per_cpu(sched_res, new_cpu);
+    }
 }
 
 static int
@@ -3119,6 +3124,7 @@ csched2_item_insert(const struct scheduler *ops, struct 
sched_item *item)
     lock = vcpu_schedule_lock_irq(vc);
 
     vc->processor = csched2_cpu_pick(ops, item);
+    item->res = per_cpu(sched_res, vc->processor);
 
     spin_unlock_irq(lock);
 
@@ -3596,6 +3602,7 @@ csched2_schedule(
         {
             snext->credit += CSCHED2_MIGRATE_COMPENSATION;
             snext->vcpu->processor = cpu;
+            snext->vcpu->sched_item->res = per_cpu(sched_res, cpu);
             SCHED_STAT_CRANK(migrated);
             ret.migrated = 1;
         }
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index eb51ddbccb..114b32e2e1 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -343,6 +343,7 @@ static void vcpu_assign(struct null_private *prv, struct 
vcpu *v,
 {
     per_cpu(npc, cpu).vcpu = v;
     v->processor = cpu;
+    v->sched_item->res = per_cpu(sched_res, cpu);
     cpumask_clear_cpu(cpu, &prv->cpus_free);
 
     dprintk(XENLOG_G_INFO, "%d <-- %pv\n", cpu, v);
@@ -429,6 +430,7 @@ static void null_item_insert(const struct scheduler *ops,
  retry:
 
     cpu = v->processor = pick_cpu(prv, v);
+    item->res = per_cpu(sched_res, cpu);
 
     spin_unlock(lock);
 
@@ -675,6 +677,7 @@ static void null_item_migrate(const struct scheduler *ops,
      * by this, will be fixed-up during resume.
      */
     v->processor = new_cpu;
+    item->res = per_cpu(sched_res, new_cpu);
 }
 
 #ifndef NDEBUG
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index c830aac92f..44b86fc08d 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -902,6 +902,7 @@ rt_item_insert(const struct scheduler *ops, struct 
sched_item *item)
 
     /* This is safe because vc isn't yet being scheduled */
     vc->processor = rt_cpu_pick(ops, item);
+    item->res = per_cpu(sched_res, vc->processor);
 
     lock = vcpu_schedule_lock_irq(vc);
 
@@ -1132,6 +1133,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, 
bool_t tasklet_work_sched
         if ( snext->vcpu->processor != cpu )
         {
             snext->vcpu->processor = cpu;
+            snext->vcpu->sched_item->res = per_cpu(sched_res, cpu);
             ret.migrated = 1;
         }
         ret.time = snext->cur_budget; /* invoke the scheduler next time */
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index e9d91d29cc..db297f6144 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -63,6 +63,7 @@ static void poll_timer_fn(void *data);
 /* This is global for now so that private implementations can reach it */
 DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
+DEFINE_PER_CPU(struct sched_resource *, sched_res);
 
 /* Scratch space for cpumasks. */
 DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
@@ -309,6 +310,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     if ( (item = sched_alloc_item(v)) == NULL )
         return 1;
 
+    item->res = per_cpu(sched_res, processor);
     /* Initialise the per-vcpu timers. */
     init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
                v, v->processor);
@@ -423,6 +425,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         sched_set_affinity(v, &cpumask_all, &cpumask_all);
 
         v->processor = new_p;
+       v->sched_item->res = per_cpu(sched_res, new_p);
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
@@ -613,7 +616,10 @@ static void vcpu_move_locked(struct vcpu *v, unsigned int 
new_cpu)
     if ( vcpu_scheduler(v)->migrate )
         SCHED_OP(vcpu_scheduler(v), migrate, v->sched_item, new_cpu);
     else
+    {
         v->processor = new_cpu;
+        v->sched_item->res = per_cpu(sched_res, new_cpu);
+    }
 }
 
 /*
@@ -794,9 +800,11 @@ void restore_vcpu_affinity(struct domain *d)
         }
 
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
+        v->sched_item->res = per_cpu(sched_res, v->processor);
 
         lock = vcpu_schedule_lock_irq(v);
         v->processor = SCHED_OP(vcpu_scheduler(v), pick_cpu, v->sched_item);
+        v->sched_item->res = per_cpu(sched_res, v->processor);
         spin_unlock_irq(lock);
 
         if ( old_cpu != v->processor )
@@ -1635,6 +1643,13 @@ static int cpu_schedule_up(unsigned int cpu)
 {
     struct schedule_data *sd = &per_cpu(schedule_data, cpu);
     void *sched_priv;
+    struct sched_resource *res;
+
+    res = xmalloc(struct sched_resource);
+    if ( res == NULL )
+        return -ENOMEM;
+    res->processor = cpu;
+    per_cpu(sched_res, cpu) = res;
 
     per_cpu(scheduler, cpu) = &ops;
     spin_lock_init(&sd->_lock);
@@ -1699,6 +1714,9 @@ static void cpu_schedule_down(unsigned int cpu)
     sd->sched_priv = NULL;
 
     kill_timer(&sd->s_timer);
+
+    xfree(per_cpu(sched_res, cpu));
+    per_cpu(sched_res, cpu) = NULL;
 }
 
 static int cpu_schedule_callback(
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 4caade5b8b..43235951a3 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -44,14 +44,20 @@ struct schedule_data {
 
 #define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
 
+struct sched_resource {
+    unsigned     processor;
+};
+
 DECLARE_PER_CPU(struct schedule_data, schedule_data);
 DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
+DECLARE_PER_CPU(struct sched_resource *, sched_res);
 
 struct sched_item {
     struct vcpu           *vcpu;
     void                  *priv;      /* scheduler private data */
     struct sched_item     *next_in_list;
+    struct sched_resource *res;
 };
 
 #define for_each_sched_item(d, e)                                         \
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.