[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 9/9] xen/sched: add const qualifier where appropriate



Make use of the const qualifier more often in scheduling code.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Dario Faggioli <dfaggioli@xxxxxxxx>
---
 xen/common/sched/arinc653.c |  4 ++--
 xen/common/sched/core.c     | 25 +++++++++++-----------
 xen/common/sched/cpupool.c  |  2 +-
 xen/common/sched/credit.c   | 44 ++++++++++++++++++++------------------
 xen/common/sched/credit2.c  | 52 +++++++++++++++++++++++----------------------
 xen/common/sched/null.c     | 17 ++++++++-------
 xen/common/sched/rt.c       | 32 ++++++++++++++--------------
 xen/include/xen/sched.h     |  9 ++++----
 8 files changed, 96 insertions(+), 89 deletions(-)

diff --git a/xen/common/sched/arinc653.c b/xen/common/sched/arinc653.c
index bce8021e3f..5421918221 100644
--- a/xen/common/sched/arinc653.c
+++ b/xen/common/sched/arinc653.c
@@ -608,7 +608,7 @@ static struct sched_resource *
 a653sched_pick_resource(const struct scheduler *ops,
                         const struct sched_unit *unit)
 {
-    cpumask_t *online;
+    const cpumask_t *online;
     unsigned int cpu;
 
     /*
@@ -639,7 +639,7 @@ a653_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
                   void *pdata, void *vdata)
 {
     struct sched_resource *sr = get_sched_res(cpu);
-    arinc653_unit_t *svc = vdata;
+    const arinc653_unit_t *svc = vdata;
 
     ASSERT(!pdata && svc && is_idle_unit(svc->unit));
 
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index d32b9b1baa..944164d78a 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -175,7 +175,7 @@ static inline struct scheduler *dom_scheduler(const struct 
domain *d)
 
 static inline struct scheduler *unit_scheduler(const struct sched_unit *unit)
 {
-    struct domain *d = unit->domain;
+    const struct domain *d = unit->domain;
 
     if ( likely(d->cpupool != NULL) )
         return d->cpupool->sched;
@@ -202,7 +202,7 @@ static inline struct scheduler *vcpu_scheduler(const struct 
vcpu *v)
 }
 #define VCPU2ONLINE(_v) cpupool_domain_master_cpumask((_v)->domain)
 
-static inline void trace_runstate_change(struct vcpu *v, int new_state)
+static inline void trace_runstate_change(const struct vcpu *v, int new_state)
 {
     struct { uint32_t vcpu:16, domain:16; } d;
     uint32_t event;
@@ -220,7 +220,7 @@ static inline void trace_runstate_change(struct vcpu *v, 
int new_state)
     __trace_var(event, 1/*tsc*/, sizeof(d), &d);
 }
 
-static inline void trace_continue_running(struct vcpu *v)
+static inline void trace_continue_running(const struct vcpu *v)
 {
     struct { uint32_t vcpu:16, domain:16; } d;
 
@@ -302,7 +302,8 @@ void sched_guest_idle(void (*idle) (void), unsigned int cpu)
     atomic_dec(&per_cpu(sched_urgent_count, cpu));
 }
 
-void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
+void vcpu_runstate_get(const struct vcpu *v,
+                       struct vcpu_runstate_info *runstate)
 {
     spinlock_t *lock;
     s_time_t delta;
@@ -324,7 +325,7 @@ void vcpu_runstate_get(struct vcpu *v, struct 
vcpu_runstate_info *runstate)
 uint64_t get_cpu_idle_time(unsigned int cpu)
 {
     struct vcpu_runstate_info state = { 0 };
-    struct vcpu *v = idle_vcpu[cpu];
+    const struct vcpu *v = idle_vcpu[cpu];
 
     if ( cpu_online(cpu) && v )
         vcpu_runstate_get(v, &state);
@@ -392,7 +393,7 @@ static void sched_free_unit_mem(struct sched_unit *unit)
 
 static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
 {
-    struct vcpu *vunit;
+    const struct vcpu *vunit;
     unsigned int cnt = 0;
 
     /* Don't count to be released vcpu, might be not in vcpu list yet. */
@@ -522,7 +523,7 @@ static unsigned int sched_select_initial_cpu(const struct 
vcpu *v)
 
 int sched_init_vcpu(struct vcpu *v)
 {
-    struct domain *d = v->domain;
+    const struct domain *d = v->domain;
     struct sched_unit *unit;
     unsigned int processor;
 
@@ -913,7 +914,7 @@ static void sched_unit_move_locked(struct sched_unit *unit,
                                    unsigned int new_cpu)
 {
     unsigned int old_cpu = unit->res->master_cpu;
-    struct vcpu *v;
+    const struct vcpu *v;
 
     rcu_read_lock(&sched_res_rculock);
 
@@ -1090,7 +1091,7 @@ static bool sched_check_affinity_broken(const struct 
sched_unit *unit)
     return false;
 }
 
-static void sched_reset_affinity_broken(struct sched_unit *unit)
+static void sched_reset_affinity_broken(const struct sched_unit *unit)
 {
     struct vcpu *v;
 
@@ -1176,7 +1177,7 @@ void restore_vcpu_affinity(struct domain *d)
 int cpu_disable_scheduler(unsigned int cpu)
 {
     struct domain *d;
-    struct cpupool *c;
+    const struct cpupool *c;
     cpumask_t online_affinity;
     int ret = 0;
 
@@ -1251,8 +1252,8 @@ out:
 static int cpu_disable_scheduler_check(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
-    struct cpupool *c;
+    const struct vcpu *v;
+    const struct cpupool *c;
 
     c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index c1396cfff4..28d5143e37 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -881,7 +881,7 @@ int cpupool_get_id(const struct domain *d)
     return d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
 }
 
-cpumask_t *cpupool_valid_cpus(struct cpupool *pool)
+const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool)
 {
     return pool->cpu_valid;
 }
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index a75efbd43d..cdda6fa09b 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -233,7 +233,7 @@ static void csched_tick(void *_cpu);
 static void csched_acct(void *dummy);
 
 static inline int
-__unit_on_runq(struct csched_unit *svc)
+__unit_on_runq(const struct csched_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
@@ -349,11 +349,11 @@ boolean_param("tickle_one_idle_cpu", opt_tickle_one_idle);
 
 DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 
-static inline void __runq_tickle(struct csched_unit *new)
+static inline void __runq_tickle(const struct csched_unit *new)
 {
     unsigned int cpu = sched_unit_master(new->unit);
-    struct sched_resource *sr = get_sched_res(cpu);
-    struct sched_unit *unit = new->unit;
+    const struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_unit *unit = new->unit;
     struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(sr->scheduler);
     cpumask_t mask, idle_mask, *online;
@@ -509,7 +509,7 @@ static inline void __runq_tickle(struct csched_unit *new)
 static void
 csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
-    struct csched_private *prv = CSCHED_PRIV(ops);
+    const struct csched_private *prv = CSCHED_PRIV(ops);
 
     /*
      * pcpu either points to a valid struct csched_pcpu, or is NULL, if we're
@@ -652,7 +652,7 @@ csched_switch_sched(struct scheduler *new_ops, unsigned int 
cpu,
 
 #ifndef NDEBUG
 static inline void
-__csched_unit_check(struct sched_unit *unit)
+__csched_unit_check(const struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
     struct csched_dom * const sdom = svc->sdom;
@@ -700,8 +700,8 @@ __csched_vcpu_is_cache_hot(const struct csched_private *prv,
 
 static inline int
 __csched_unit_is_migrateable(const struct csched_private *prv,
-                             struct sched_unit *unit,
-                             int dest_cpu, cpumask_t *mask)
+                             const struct sched_unit *unit,
+                             int dest_cpu, const cpumask_t *mask)
 {
     const struct csched_unit *svc = CSCHED_UNIT(unit);
     /*
@@ -725,7 +725,7 @@ _csched_cpu_pick(const struct scheduler *ops, const struct 
sched_unit *unit,
     /* We must always use cpu's scratch space */
     cpumask_t *cpus = cpumask_scratch_cpu(cpu);
     cpumask_t idlers;
-    cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
+    const cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
     struct csched_pcpu *spc = NULL;
     int balance_step;
 
@@ -932,7 +932,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int 
cpu)
 {
     struct sched_unit *currunit = current->sched_unit;
     struct csched_unit * const svc = CSCHED_UNIT(currunit);
-    struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_resource *sr = get_sched_res(cpu);
     const struct scheduler *ops = sr->scheduler;
 
     ASSERT( sched_unit_master(currunit) == cpu );
@@ -1084,7 +1084,7 @@ csched_unit_sleep(const struct scheduler *ops, struct 
sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
     unsigned int cpu = sched_unit_master(unit);
-    struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_resource *sr = get_sched_res(cpu);
 
     SCHED_STAT_CRANK(unit_sleep);
 
@@ -1577,7 +1577,7 @@ static void
 csched_tick(void *_cpu)
 {
     unsigned int cpu = (unsigned long)_cpu;
-    struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_resource *sr = get_sched_res(cpu);
     struct csched_pcpu *spc = CSCHED_PCPU(cpu);
     struct csched_private *prv = CSCHED_PRIV(sr->scheduler);
 
@@ -1604,7 +1604,7 @@ csched_tick(void *_cpu)
 static struct csched_unit *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
-    struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_resource *sr = get_sched_res(cpu);
     const struct csched_private * const prv = CSCHED_PRIV(sr->scheduler);
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
     struct csched_unit *speer;
@@ -1681,10 +1681,10 @@ static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
     struct csched_unit *snext, bool *stolen)
 {
-    struct cpupool *c = get_sched_res(cpu)->cpupool;
+    const struct cpupool *c = get_sched_res(cpu)->cpupool;
     struct csched_unit *speer;
     cpumask_t workers;
-    cpumask_t *online = c->res_valid;
+    const cpumask_t *online = c->res_valid;
     int peer_cpu, first_cpu, peer_node, bstep;
     int node = cpu_to_node(cpu);
 
@@ -2008,7 +2008,7 @@ out:
 }
 
 static void
-csched_dump_unit(struct csched_unit *svc)
+csched_dump_unit(const struct csched_unit *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
 
@@ -2041,10 +2041,11 @@ csched_dump_unit(struct csched_unit *svc)
 static void
 csched_dump_pcpu(const struct scheduler *ops, int cpu)
 {
-    struct list_head *runq, *iter;
+    const struct list_head *runq;
+    struct list_head *iter;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct csched_pcpu *spc;
-    struct csched_unit *svc;
+    const struct csched_pcpu *spc;
+    const struct csched_unit *svc;
     spinlock_t *lock;
     unsigned long flags;
     int loop;
@@ -2132,12 +2133,13 @@ csched_dump(const struct scheduler *ops)
     loop = 0;
     list_for_each( iter_sdom, &prv->active_sdom )
     {
-        struct csched_dom *sdom;
+        const struct csched_dom *sdom;
+
         sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
 
         list_for_each( iter_svc, &sdom->active_unit )
         {
-            struct csched_unit *svc;
+            const struct csched_unit *svc;
             spinlock_t *lock;
 
             svc = list_entry(iter_svc, struct csched_unit, active_unit_elem);
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index 849d254e04..256c1c01fc 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -692,7 +692,7 @@ void smt_idle_mask_clear(unsigned int cpu, cpumask_t *mask)
  */
 static int get_fallback_cpu(struct csched2_unit *svc)
 {
-    struct sched_unit *unit = svc->unit;
+    const struct sched_unit *unit = svc->unit;
     unsigned int bs;
 
     SCHED_STAT_CRANK(need_fallback_cpu);
@@ -774,7 +774,7 @@ static int get_fallback_cpu(struct csched2_unit *svc)
  *
  * FIXME: Do pre-calculated division?
  */
-static void t2c_update(struct csched2_runqueue_data *rqd, s_time_t time,
+static void t2c_update(const struct csched2_runqueue_data *rqd, s_time_t time,
                           struct csched2_unit *svc)
 {
     uint64_t val = time * rqd->max_weight + svc->residual;
@@ -783,7 +783,8 @@ static void t2c_update(struct csched2_runqueue_data *rqd, 
s_time_t time,
     svc->credit -= val;
 }
 
-static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct 
csched2_unit *svc)
+static s_time_t c2t(const struct csched2_runqueue_data *rqd, s_time_t credit,
+                    const struct csched2_unit *svc)
 {
     return credit * svc->weight / rqd->max_weight;
 }
@@ -792,7 +793,7 @@ static s_time_t c2t(struct csched2_runqueue_data *rqd, 
s_time_t credit, struct c
  * Runqueue related code.
  */
 
-static inline int unit_on_runq(struct csched2_unit *svc)
+static inline int unit_on_runq(const struct csched2_unit *svc)
 {
     return !list_empty(&svc->runq_elem);
 }
@@ -849,9 +850,9 @@ static inline bool same_core(unsigned int cpua, unsigned 
int cpub)
 }
 
 static unsigned int
-cpu_to_runqueue(struct csched2_private *prv, unsigned int cpu)
+cpu_to_runqueue(const struct csched2_private *prv, unsigned int cpu)
 {
-    struct csched2_runqueue_data *rqd;
+    const struct csched2_runqueue_data *rqd;
     unsigned int rqi;
 
     for ( rqi = 0; rqi < nr_cpu_ids; rqi++ )
@@ -917,7 +918,7 @@ static void update_max_weight(struct csched2_runqueue_data 
*rqd, int new_weight,
 
         list_for_each( iter, &rqd->svc )
         {
-            struct csched2_unit * svc = list_entry(iter, struct csched2_unit, 
rqd_elem);
+            const struct csched2_unit * svc = list_entry(iter, struct 
csched2_unit, rqd_elem);
 
             if ( svc->weight > max_weight )
                 max_weight = svc->weight;
@@ -970,7 +971,7 @@ _runq_assign(struct csched2_unit *svc, struct 
csched2_runqueue_data *rqd)
 }
 
 static void
-runq_assign(const struct scheduler *ops, struct sched_unit *unit)
+runq_assign(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct csched2_unit *svc = unit->priv;
 
@@ -997,7 +998,7 @@ _runq_deassign(struct csched2_unit *svc)
 }
 
 static void
-runq_deassign(const struct scheduler *ops, struct sched_unit *unit)
+runq_deassign(const struct scheduler *ops, const struct sched_unit *unit)
 {
     struct csched2_unit *svc = unit->priv;
 
@@ -1203,7 +1204,7 @@ static void
 update_svc_load(const struct scheduler *ops,
                 struct csched2_unit *svc, int change, s_time_t now)
 {
-    struct csched2_private *prv = csched2_priv(ops);
+    const struct csched2_private *prv = csched2_priv(ops);
     s_time_t delta, unit_load;
     unsigned int P, W;
 
@@ -1362,11 +1363,11 @@ static inline bool is_preemptable(const struct 
csched2_unit *svc,
  * Within the same class, the highest difference of credit.
  */
 static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
-                             struct csched2_unit *new, unsigned int cpu)
+                             const struct csched2_unit *new, unsigned int cpu)
 {
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct csched2_unit * cur = csched2_unit(curr_on_cpu(cpu));
-    struct csched2_private *prv = csched2_priv(ops);
+    const struct csched2_private *prv = csched2_priv(ops);
     s_time_t score;
 
     /*
@@ -1441,7 +1442,7 @@ runq_tickle(const struct scheduler *ops, struct 
csched2_unit *new, s_time_t now)
     struct sched_unit *unit = new->unit;
     unsigned int bs, cpu = sched_unit_master(unit);
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
-    cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
+    const cpumask_t *online = cpupool_domain_master_cpumask(unit->domain);
     cpumask_t mask;
 
     ASSERT(new->rqd == rqd);
@@ -2005,7 +2006,7 @@ static void replenish_domain_budget(void* data)
 
 #ifndef NDEBUG
 static inline void
-csched2_unit_check(struct sched_unit *unit)
+csched2_unit_check(const struct sched_unit *unit)
 {
     struct csched2_unit * const svc = csched2_unit(unit);
     struct csched2_dom * const sdom = svc->sdom;
@@ -2541,8 +2542,8 @@ static void migrate(const struct scheduler *ops,
  *  - svc is not already flagged to migrate,
  *  - if svc is allowed to run on at least one of the pcpus of rqd.
  */
-static bool unit_is_migrateable(struct csched2_unit *svc,
-                                  struct csched2_runqueue_data *rqd)
+static bool unit_is_migrateable(const struct csched2_unit *svc,
+                                const struct csched2_runqueue_data *rqd)
 {
     struct sched_unit *unit = svc->unit;
     int cpu = sched_unit_master(unit);
@@ -3076,7 +3077,7 @@ csched2_free_domdata(const struct scheduler *ops, void 
*data)
 static void
 csched2_unit_insert(const struct scheduler *ops, struct sched_unit *unit)
 {
-    struct csched2_unit *svc = unit->priv;
+    const struct csched2_unit *svc = unit->priv;
     struct csched2_dom * const sdom = svc->sdom;
     spinlock_t *lock;
 
@@ -3142,7 +3143,7 @@ csched2_runtime(const struct scheduler *ops, int cpu,
     int rt_credit; /* Proposed runtime measured in credits */
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     struct list_head *runq = &rqd->runq;
-    struct csched2_private *prv = csched2_priv(ops);
+    const struct csched2_private *prv = csched2_priv(ops);
 
     /*
      * If we're idle, just stay so. Others (or external events)
@@ -3239,7 +3240,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
                unsigned int *skipped)
 {
     struct list_head *iter, *temp;
-    struct sched_resource *sr = get_sched_res(cpu);
+    const struct sched_resource *sr = get_sched_res(cpu);
     struct csched2_unit *snext = NULL;
     struct csched2_private *prv = csched2_priv(sr->scheduler);
     bool yield = false, soft_aff_preempt = false;
@@ -3603,7 +3604,8 @@ static void csched2_schedule(
 }
 
 static void
-csched2_dump_unit(struct csched2_private *prv, struct csched2_unit *svc)
+csched2_dump_unit(const struct csched2_private *prv,
+                  const struct csched2_unit *svc)
 {
     printk("[%i.%i] flags=%x cpu=%i",
             svc->unit->domain->domain_id,
@@ -3626,8 +3628,8 @@ csched2_dump_unit(struct csched2_private *prv, struct 
csched2_unit *svc)
 static inline void
 dump_pcpu(const struct scheduler *ops, int cpu)
 {
-    struct csched2_private *prv = csched2_priv(ops);
-    struct csched2_unit *svc;
+    const struct csched2_private *prv = csched2_priv(ops);
+    const struct csched2_unit *svc;
 
     printk("CPU[%02d] runq=%d, sibling={%*pbl}, core={%*pbl}\n",
            cpu, c2r(cpu),
@@ -3695,8 +3697,8 @@ csched2_dump(const struct scheduler *ops)
     loop = 0;
     list_for_each( iter_sdom, &prv->sdom )
     {
-        struct csched2_dom *sdom;
-        struct sched_unit *unit;
+        const struct csched2_dom *sdom;
+        const struct sched_unit *unit;
 
         sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
 
@@ -3737,7 +3739,7 @@ csched2_dump(const struct scheduler *ops)
         printk("RUNQ:\n");
         list_for_each( iter, runq )
         {
-            struct csched2_unit *svc = runq_elem(iter);
+            const struct csched2_unit *svc = runq_elem(iter);
 
             if ( svc )
             {
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 3161ac2e62..8c3101649d 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -278,12 +278,12 @@ static void null_free_domdata(const struct scheduler 
*ops, void *data)
  * So this is not part of any hot path.
  */
 static struct sched_resource *
-pick_res(struct null_private *prv, const struct sched_unit *unit)
+pick_res(const struct null_private *prv, const struct sched_unit *unit)
 {
     unsigned int bs;
     unsigned int cpu = sched_unit_master(unit), new_cpu;
-    cpumask_t *cpus = cpupool_domain_master_cpumask(unit->domain);
-    struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
+    const cpumask_t *cpus = cpupool_domain_master_cpumask(unit->domain);
+    const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
 
     ASSERT(spin_is_locked(get_sched_res(cpu)->schedule_lock));
 
@@ -375,7 +375,7 @@ static void unit_assign(struct null_private *prv, struct 
sched_unit *unit,
 }
 
 /* Returns true if a cpu was tickled */
-static bool unit_deassign(struct null_private *prv, struct sched_unit *unit)
+static bool unit_deassign(struct null_private *prv, const struct sched_unit 
*unit)
 {
     unsigned int bs;
     unsigned int cpu = sched_unit_master(unit);
@@ -441,7 +441,7 @@ static spinlock_t *null_switch_sched(struct scheduler 
*new_ops,
 {
     struct sched_resource *sr = get_sched_res(cpu);
     struct null_private *prv = null_priv(new_ops);
-    struct null_unit *nvc = vdata;
+    const struct null_unit *nvc = vdata;
 
     ASSERT(nvc && is_idle_unit(nvc->unit));
 
@@ -940,7 +940,8 @@ static void null_schedule(const struct scheduler *ops, 
struct sched_unit *prev,
     prev->next_task->migrated = false;
 }
 
-static inline void dump_unit(struct null_private *prv, struct null_unit *nvc)
+static inline void dump_unit(const struct null_private *prv,
+                             const struct null_unit *nvc)
 {
     printk("[%i.%i] pcpu=%d", nvc->unit->domain->domain_id,
             nvc->unit->unit_id, list_empty(&nvc->waitq_elem) ?
@@ -950,8 +951,8 @@ static inline void dump_unit(struct null_private *prv, 
struct null_unit *nvc)
 static void null_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
-    struct null_unit *nvc;
+    const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
+    const struct null_unit *nvc;
     spinlock_t *lock;
     unsigned long flags;
 
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 6dc4b6a6e5..f90a605643 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -352,7 +352,7 @@ static void
 rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_unit *svc;
+    const struct rt_unit *svc;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
@@ -371,8 +371,8 @@ rt_dump(const struct scheduler *ops)
 {
     struct list_head *runq, *depletedq, *replq, *iter;
     struct rt_private *prv = rt_priv(ops);
-    struct rt_unit *svc;
-    struct rt_dom *sdom;
+    const struct rt_unit *svc;
+    const struct rt_dom *sdom;
     unsigned long flags;
 
     spin_lock_irqsave(&prv->lock, flags);
@@ -408,7 +408,7 @@ rt_dump(const struct scheduler *ops)
     printk("Domain info:\n");
     list_for_each ( iter, &prv->sdom )
     {
-        struct sched_unit *unit;
+        const struct sched_unit *unit;
 
         sdom = list_entry(iter, struct rt_dom, sdom_elem);
         printk("\tdomain: %d\n", sdom->dom->domain_id);
@@ -509,7 +509,7 @@ deadline_queue_insert(struct rt_unit * (*qelem)(struct 
list_head *),
 
     list_for_each ( iter, queue )
     {
-        struct rt_unit * iter_svc = (*qelem)(iter);
+        const struct rt_unit * iter_svc = (*qelem)(iter);
         if ( compare_unit_priority(svc, iter_svc) > 0 )
             break;
         first = false;
@@ -547,7 +547,7 @@ replq_remove(const struct scheduler *ops, struct rt_unit 
*svc)
          */
         if ( !list_empty(replq) )
         {
-            struct rt_unit *svc_next = replq_elem(replq->next);
+            const struct rt_unit *svc_next = replq_elem(replq->next);
             set_timer(&prv->repl_timer, svc_next->cur_deadline);
         }
         else
@@ -604,7 +604,7 @@ static void
 replq_reinsert(const struct scheduler *ops, struct rt_unit *svc)
 {
     struct list_head *replq = rt_replq(ops);
-    struct rt_unit *rearm_svc = svc;
+    const struct rt_unit *rearm_svc = svc;
     bool rearm = false;
 
     ASSERT( unit_on_replq(svc) );
@@ -640,7 +640,7 @@ static struct sched_resource *
 rt_res_pick_locked(const struct sched_unit *unit, unsigned int locked_cpu)
 {
     cpumask_t *cpus = cpumask_scratch_cpu(locked_cpu);
-    cpumask_t *online;
+    const cpumask_t *online;
     int cpu;
 
     online = cpupool_domain_master_cpumask(unit->domain);
@@ -1028,7 +1028,7 @@ runq_pick(const struct scheduler *ops, const cpumask_t 
*mask, unsigned int cpu)
     struct rt_unit *svc = NULL;
     struct rt_unit *iter_svc = NULL;
     cpumask_t *cpu_common = cpumask_scratch_cpu(cpu);
-    cpumask_t *online;
+    const cpumask_t *online;
 
     list_for_each ( iter, runq )
     {
@@ -1197,15 +1197,15 @@ rt_unit_sleep(const struct scheduler *ops, struct 
sched_unit *unit)
  * lock is grabbed before calling this function
  */
 static void
-runq_tickle(const struct scheduler *ops, struct rt_unit *new)
+runq_tickle(const struct scheduler *ops, const struct rt_unit *new)
 {
     struct rt_private *prv = rt_priv(ops);
-    struct rt_unit *latest_deadline_unit = NULL; /* lowest priority */
-    struct rt_unit *iter_svc;
-    struct sched_unit *iter_unit;
+    const struct rt_unit *latest_deadline_unit = NULL; /* lowest priority */
+    const struct rt_unit *iter_svc;
+    const struct sched_unit *iter_unit;
     int cpu = 0, cpu_to_tickle = 0;
     cpumask_t *not_tickled = cpumask_scratch_cpu(smp_processor_id());
-    cpumask_t *online;
+    const cpumask_t *online;
 
     if ( new == NULL || is_idle_unit(new->unit) )
         return;
@@ -1379,7 +1379,7 @@ rt_dom_cntl(
 {
     struct rt_private *prv = rt_priv(ops);
     struct rt_unit *svc;
-    struct sched_unit *unit;
+    const struct sched_unit *unit;
     unsigned long flags;
     int rc = 0;
     struct xen_domctl_schedparam_vcpu local_sched;
@@ -1484,7 +1484,7 @@ rt_dom_cntl(
  */
 static void repl_timer_handler(void *data){
     s_time_t now;
-    struct scheduler *ops = data;
+    const struct scheduler *ops = data;
     struct rt_private *prv = rt_priv(ops);
     struct list_head *replq = rt_replq(ops);
     struct list_head *runq = rt_runq(ops);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cf7aa39844..7c5c437247 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -773,7 +773,7 @@ static inline void hypercall_cancel_continuation(struct 
vcpu *v)
 extern struct domain *domain_list;
 
 /* Caller must hold the domlist_read_lock or domlist_update_lock. */
-static inline struct domain *first_domain_in_cpupool( struct cpupool *c)
+static inline struct domain *first_domain_in_cpupool(const struct cpupool *c)
 {
     struct domain *d;
     for (d = rcu_dereference(domain_list); d && d->cpupool != c;
@@ -781,7 +781,7 @@ static inline struct domain *first_domain_in_cpupool( 
struct cpupool *c)
     return d;
 }
 static inline struct domain *next_domain_in_cpupool(
-    struct domain *d, struct cpupool *c)
+    struct domain *d, const struct cpupool *c)
 {
     for (d = rcu_dereference(d->next_in_list); d && d->cpupool != c;
          d = rcu_dereference(d->next_in_list));
@@ -925,7 +925,8 @@ void restore_vcpu_affinity(struct domain *d);
 int vcpu_affinity_domctl(struct domain *d, uint32_t cmd,
                          struct xen_domctl_vcpuaffinity *vcpuaff);
 
-void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
+void vcpu_runstate_get(const struct vcpu *v,
+                       struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
 void sched_guest_idle(void (*idle) (void), unsigned int cpu);
 void scheduler_enable(void);
@@ -1056,7 +1057,7 @@ extern enum cpufreq_controller {
 int cpupool_move_domain(struct domain *d, struct cpupool *c);
 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
 int cpupool_get_id(const struct domain *d);
-cpumask_t *cpupool_valid_cpus(struct cpupool *pool);
+const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool);
 extern void dump_runq(unsigned char key);
 
 void arch_do_physinfo(struct xen_sysctl_physinfo *pi);
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.