# HG changeset patch # User Juergen Gross # Date 1361967550 -3600 # Node ID 550a413da1ac4333d4ecd8033d92893b96d5d853 # Parent 1d8c65aee03eaf15ce8ee50deb781b4308302b77 Rename credit2 names to csched2_* Functions, variables, structures and macros in the credit2 scheduler had partially the same names as in the credit scheduler. This makes it hard to find the correct functions in backtraces or cscope. Rename all names in credit2 from csched_*/CSCHED_* to csched2_*/CSCHED2_* Signed-off-by: Juergen Gross diff -r 1d8c65aee03e -r 550a413da1ac xen/common/sched_credit2.c --- a/xen/common/sched_credit2.c Tue Feb 26 10:12:46 2013 +0000 +++ b/xen/common/sched_credit2.c Wed Feb 27 13:19:10 2013 +0100 @@ -3,7 +3,7 @@ * (C) 2009 - George Dunlap - Citrix Systems R&D UK, Ltd **************************************************************************** * - * File: common/csched_credit2.c + * File: common/sched_credit2.c * Author: George Dunlap * * Description: Credit-based SMP CPU scheduler @@ -108,29 +108,29 @@ * Basic constants */ /* Default weight: How much a new domain starts with */ -#define CSCHED_DEFAULT_WEIGHT 256 +#define CSCHED2_DEFAULT_WEIGHT 256 /* Min timer: Minimum length a timer will be set, to * achieve efficiency */ -#define CSCHED_MIN_TIMER MICROSECS(500) +#define CSCHED2_MIN_TIMER MICROSECS(500) /* Amount of credit VMs begin with, and are reset to. * ATM, set so that highest-weight VMs can only run for 10ms * before a reset event. */ -#define CSCHED_CREDIT_INIT MILLISECS(10) +#define CSCHED2_CREDIT_INIT MILLISECS(10) /* Carryover: How much "extra" credit may be carried over after * a reset. */ -#define CSCHED_CARRYOVER_MAX CSCHED_MIN_TIMER +#define CSCHED2_CARRYOVER_MAX CSCHED2_MIN_TIMER /* Stickiness: Cross-L2 migration resistance. Should be less than * MIN_TIMER. */ -#define CSCHED_MIGRATE_RESIST ((opt_migrate_resist)*MICROSECS(1)) +#define CSCHED2_MIGRATE_RESIST ((opt_migrate_resist)*MICROSECS(1)) /* How much to "compensate" a vcpu for L2 migration */ -#define CSCHED_MIGRATE_COMPENSATION MICROSECS(50) +#define CSCHED2_MIGRATE_COMPENSATION MICROSECS(50) /* Reset: Value below which credit will be reset. */ -#define CSCHED_CREDIT_RESET 0 +#define CSCHED2_CREDIT_RESET 0 /* Max timer: Maximum time a guest can be run for. */ -#define CSCHED_MAX_TIMER MILLISECS(2) +#define CSCHED2_MAX_TIMER MILLISECS(2) -#define CSCHED_IDLE_CREDIT (-(1<<30)) +#define CSCHED2_IDLE_CREDIT (-(1<<30)) /* * Flags @@ -138,8 +138,8 @@ /* CSFLAG_scheduled: Is this vcpu either running on, or context-switching off, * a physical cpu? * + Accessed only with runqueue lock held - * + Set when chosen as next in csched_schedule(). - * + Cleared after context switch has been saved in csched_context_saved() + * + Set when chosen as next in csched2_schedule(). + * + Cleared after context switch has been saved in csched2_context_saved() * + Checked in vcpu_wake to see if we can add to the runqueue, or if we should * set CSFLAG_delayed_runq_add * + Checked to be false in runq_insert. @@ -148,9 +148,9 @@ #define CSFLAG_scheduled (1<<__CSFLAG_scheduled) /* CSFLAG_delayed_runq_add: Do we need to add this to the runqueue once it'd done * being context switched out? - * + Set when scheduling out in csched_schedule() if prev is runnable - * + Set in csched_vcpu_wake if it finds CSFLAG_scheduled set - * + Read in csched_context_saved(). If set, it adds prev to the runqueue and + * + Set when scheduling out in csched2_schedule() if prev is runnable + * + Set in csched2_vcpu_wake if it finds CSFLAG_scheduled set + * + Read in csched2_context_saved(). If set, it adds prev to the runqueue and * clears the bit. */ #define __CSFLAG_delayed_runq_add 2 @@ -169,14 +169,14 @@ integer_param("sched_credit2_migrate_res /* * Useful macros */ -#define CSCHED_PRIV(_ops) \ - ((struct csched_private *)((_ops)->sched_data)) -#define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv) -#define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv) +#define CSCHED2_PRIV(_ops) \ + ((struct csched2_private *)((_ops)->sched_data)) +#define CSCHED2_VCPU(_vcpu) ((struct csched2_vcpu *) (_vcpu)->sched_priv) +#define CSCHED2_DOM(_dom) ((struct csched2_dom *) (_dom)->sched_priv) /* CPU to runq_id macro */ -#define c2r(_ops, _cpu) (CSCHED_PRIV(_ops)->runq_map[(_cpu)]) +#define c2r(_ops, _cpu) (CSCHED2_PRIV(_ops)->runq_map[(_cpu)]) /* CPU to runqueue struct macro */ -#define RQD(_ops, _cpu) (&CSCHED_PRIV(_ops)->rqd[c2r(_ops, _cpu)]) +#define RQD(_ops, _cpu) (&CSCHED2_PRIV(_ops)->rqd[c2r(_ops, _cpu)]) /* * Shifts for load average. @@ -197,7 +197,7 @@ integer_param("credit2_balance_over", op /* * Per-runqueue data */ -struct csched_runqueue_data { +struct csched2_runqueue_data { int id; spinlock_t lock; /* Lock for this runqueue. */ @@ -218,7 +218,7 @@ struct csched_runqueue_data { /* * System-wide private data */ -struct csched_private { +struct csched2_private { spinlock_t lock; cpumask_t initialized; /* CPU is initialized for this pool */ @@ -226,7 +226,7 @@ struct csched_private { int runq_map[NR_CPUS]; cpumask_t active_queues; /* Queues which may have active cpus */ - struct csched_runqueue_data rqd[NR_CPUS]; + struct csched2_runqueue_data rqd[NR_CPUS]; int load_window_shift; }; @@ -234,14 +234,14 @@ struct csched_private { /* * Virtual CPU */ -struct csched_vcpu { +struct csched2_vcpu { struct list_head rqd_elem; /* On the runqueue data list */ struct list_head sdom_elem; /* On the domain vcpu list */ struct list_head runq_elem; /* On the runqueue */ - struct csched_runqueue_data *rqd; /* Up-pointer to the runqueue */ + struct csched2_runqueue_data *rqd; /* Up-pointer to the runqueue */ /* Up-pointers */ - struct csched_dom *sdom; + struct csched2_dom *sdom; struct vcpu *vcpu; int weight; @@ -254,13 +254,13 @@ struct csched_vcpu { s_time_t load_last_update; /* Last time average was updated */ s_time_t avgload; /* Decaying queue load */ - struct csched_runqueue_data *migrate_rqd; /* Pre-determined rqd to which to migrate */ + struct csched2_runqueue_data *migrate_rqd; /* Pre-determined rqd to which to migrate */ }; /* * Domain */ -struct csched_dom { +struct csched2_dom { struct list_head vcpu; struct list_head sdom_elem; struct domain *dom; @@ -273,12 +273,12 @@ struct csched_dom { * Time-to-credit, credit-to-time. * FIXME: Do pre-calculated division? */ -static s_time_t t2c(struct csched_runqueue_data *rqd, s_time_t time, struct csched_vcpu *svc) +static s_time_t t2c(struct csched2_runqueue_data *rqd, s_time_t time, struct csched2_vcpu *svc) { return time * rqd->max_weight / svc->weight; } -static s_time_t c2t(struct csched_runqueue_data *rqd, s_time_t credit, struct csched_vcpu *svc) +static s_time_t c2t(struct csched2_runqueue_data *rqd, s_time_t credit, struct csched2_vcpu *svc) { return credit * svc->weight / rqd->max_weight; } @@ -288,22 +288,22 @@ static s_time_t c2t(struct csched_runque */ static /*inline*/ int -__vcpu_on_runq(struct csched_vcpu *svc) +__vcpu_on_runq(struct csched2_vcpu *svc) { return !list_empty(&svc->runq_elem); } -static /*inline*/ struct csched_vcpu * +static /*inline*/ struct csched2_vcpu * __runq_elem(struct list_head *elem) { - return list_entry(elem, struct csched_vcpu, runq_elem); + return list_entry(elem, struct csched2_vcpu, runq_elem); } static void __update_runq_load(const struct scheduler *ops, - struct csched_runqueue_data *rqd, int change, s_time_t now) + struct csched2_runqueue_data *rqd, int change, s_time_t now) { - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_private *prv = CSCHED2_PRIV(ops); s_time_t delta=-1; now >>= LOADAVG_GRANULARITY_SHIFT; @@ -345,9 +345,9 @@ __update_runq_load(const struct schedule static void __update_svc_load(const struct scheduler *ops, - struct csched_vcpu *svc, int change, s_time_t now) + struct csched2_vcpu *svc, int change, s_time_t now) { - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_private *prv = CSCHED2_PRIV(ops); s_time_t delta=-1; int vcpu_load; @@ -390,8 +390,8 @@ __update_svc_load(const struct scheduler static void update_load(const struct scheduler *ops, - struct csched_runqueue_data *rqd, - struct csched_vcpu *svc, int change, s_time_t now) + struct csched2_runqueue_data *rqd, + struct csched2_vcpu *svc, int change, s_time_t now) { __update_runq_load(ops, rqd, change, now); if ( svc ) @@ -399,7 +399,7 @@ update_load(const struct scheduler *ops, } static int -__runq_insert(struct list_head *runq, struct csched_vcpu *svc) +__runq_insert(struct list_head *runq, struct csched2_vcpu *svc) { struct list_head *iter; int pos = 0; @@ -416,7 +416,7 @@ __runq_insert(struct list_head *runq, st list_for_each( iter, runq ) { - struct csched_vcpu * iter_svc = __runq_elem(iter); + struct csched2_vcpu * iter_svc = __runq_elem(iter); if ( svc->credit > iter_svc->credit ) { @@ -435,7 +435,7 @@ __runq_insert(struct list_head *runq, st } static void -runq_insert(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *svc) +runq_insert(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu *svc) { struct list_head * runq = &RQD(ops, cpu)->runq; int pos = 0; @@ -464,24 +464,24 @@ runq_insert(const struct scheduler *ops, } static inline void -__runq_remove(struct csched_vcpu *svc) +__runq_remove(struct csched2_vcpu *svc) { BUG_ON( !__vcpu_on_runq(svc) ); list_del_init(&svc->runq_elem); } -void burn_credits(struct csched_runqueue_data *rqd, struct csched_vcpu *, s_time_t); +void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *, s_time_t); /* Check to see if the item on the runqueue is higher priority than what's * currently running; if so, wake up the processor */ static /*inline*/ void -runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *new, s_time_t now) +runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu *new, s_time_t now) { int i, ipid=-1; s_time_t lowest=(1<<30); - struct csched_runqueue_data *rqd = RQD(ops, cpu); + struct csched2_runqueue_data *rqd = RQD(ops, cpu); cpumask_t mask; - struct csched_vcpu * cur; + struct csched2_vcpu * cur; d2printk("rqt d%dv%d cd%dv%d\n", new->vcpu->domain->domain_id, @@ -493,7 +493,7 @@ runq_tickle(const struct scheduler *ops, BUG_ON(new->rqd != rqd); /* Look at the cpu it's running on first */ - cur = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr); + cur = CSCHED2_VCPU(per_cpu(schedule_data, cpu).curr); burn_credits(rqd, cur, now); if ( cur->credit < new->credit ) @@ -519,13 +519,13 @@ runq_tickle(const struct scheduler *ops, for_each_cpu(i, &mask) { - struct csched_vcpu * cur; + struct csched2_vcpu * cur; /* Already looked at this one above */ if ( i == cpu ) continue; - cur = CSCHED_VCPU(per_cpu(schedule_data, i).curr); + cur = CSCHED2_VCPU(per_cpu(schedule_data, i).curr); BUG_ON(is_idle_vcpu(cur->vcpu)); @@ -554,7 +554,7 @@ runq_tickle(const struct scheduler *ops, /* Only switch to another processor if the credit difference is greater * than the migrate resistance */ - if ( ipid == -1 || lowest + CSCHED_MIGRATE_RESIST > new->credit ) + if ( ipid == -1 || lowest + CSCHED2_MIGRATE_RESIST > new->credit ) goto no_tickle; tickle: @@ -581,12 +581,12 @@ no_tickle: */ static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now) { - struct csched_runqueue_data *rqd = RQD(ops, cpu); + struct csched2_runqueue_data *rqd = RQD(ops, cpu); struct list_head *iter; list_for_each( iter, &rqd->svc ) { - struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, rqd_elem); + struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem); int start_credit; @@ -596,10 +596,10 @@ static void reset_credit(const struct sc start_credit = svc->credit; /* "Clip" credits to max carryover */ - if ( svc->credit > CSCHED_CARRYOVER_MAX ) - svc->credit = CSCHED_CARRYOVER_MAX; + if ( svc->credit > CSCHED2_CARRYOVER_MAX ) + svc->credit = CSCHED2_CARRYOVER_MAX; /* And add INIT */ - svc->credit += CSCHED_CREDIT_INIT; + svc->credit += CSCHED2_CREDIT_INIT; svc->start_time = now; /* TRACE */ { @@ -620,16 +620,16 @@ static void reset_credit(const struct sc /* No need to resort runqueue, as everyone's order should be the same. */ } -void burn_credits(struct csched_runqueue_data *rqd, struct csched_vcpu *svc, s_time_t now) +void burn_credits(struct csched2_runqueue_data *rqd, struct csched2_vcpu *svc, s_time_t now) { s_time_t delta; /* Assert svc is current */ - ASSERT(svc==CSCHED_VCPU(per_cpu(schedule_data, svc->vcpu->processor).curr)); + ASSERT(svc==CSCHED2_VCPU(per_cpu(schedule_data, svc->vcpu->processor).curr)); if ( is_idle_vcpu(svc->vcpu) ) { - BUG_ON(svc->credit != CSCHED_IDLE_CREDIT); + BUG_ON(svc->credit != CSCHED2_IDLE_CREDIT); return; } @@ -667,7 +667,7 @@ void burn_credits(struct csched_runqueue } /* Find the domain with the highest weight. */ -void update_max_weight(struct csched_runqueue_data *rqd, int new_weight, int old_weight) +void update_max_weight(struct csched2_runqueue_data *rqd, int new_weight, int old_weight) { /* Try to avoid brute-force search: * - If new_weight is larger, max_weigth <- new_weight @@ -687,7 +687,7 @@ void update_max_weight(struct csched_run list_for_each( iter, &rqd->svc ) { - struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, rqd_elem); + struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, rqd_elem); if ( svc->weight > max_weight ) max_weight = svc->weight; @@ -700,13 +700,13 @@ void update_max_weight(struct csched_run #ifndef NDEBUG static /*inline*/ void -__csched_vcpu_check(struct vcpu *vc) +__csched2_vcpu_check(struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_dom * const sdom = svc->sdom; + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); + struct csched2_dom * const sdom = svc->sdom; BUG_ON( svc->vcpu != vc ); - BUG_ON( sdom != CSCHED_DOM(vc->domain) ); + BUG_ON( sdom != CSCHED2_DOM(vc->domain) ); if ( sdom ) { BUG_ON( is_idle_vcpu(vc) ); @@ -717,18 +717,18 @@ __csched_vcpu_check(struct vcpu *vc) BUG_ON( !is_idle_vcpu(vc) ); } } -#define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc)) +#define CSCHED2_VCPU_CHECK(_vc) (__csched2_vcpu_check(_vc)) #else -#define CSCHED_VCPU_CHECK(_vc) +#define CSCHED2_VCPU_CHECK(_vc) #endif static void * -csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) +csched2_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) { - struct csched_vcpu *svc; + struct csched2_vcpu *svc; /* Allocate per-VCPU info */ - svc = xzalloc(struct csched_vcpu); + svc = xzalloc(struct csched2_vcpu); if ( svc == NULL ) return NULL; @@ -744,16 +744,16 @@ csched_alloc_vdata(const struct schedule { BUG_ON( svc->sdom == NULL ); - svc->credit = CSCHED_CREDIT_INIT; + svc->credit = CSCHED2_CREDIT_INIT; svc->weight = svc->sdom->weight; /* Starting load of 50% */ - svc->avgload = 1ULL << (CSCHED_PRIV(ops)->load_window_shift - 1); + svc->avgload = 1ULL << (CSCHED2_PRIV(ops)->load_window_shift - 1); svc->load_last_update = NOW(); } else { BUG_ON( svc->sdom != NULL ); - svc->credit = CSCHED_IDLE_CREDIT; + svc->credit = CSCHED2_IDLE_CREDIT; svc->weight = 0; } @@ -764,7 +764,7 @@ csched_alloc_vdata(const struct schedule /* Add and remove from runqueue assignment (not active run queue) */ static void -__runq_assign(struct csched_vcpu *svc, struct csched_runqueue_data *rqd) +__runq_assign(struct csched2_vcpu *svc, struct csched2_runqueue_data *rqd) { svc->rqd = rqd; @@ -794,7 +794,7 @@ static void static void runq_assign(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu *svc = vc->sched_priv; + struct csched2_vcpu *svc = vc->sched_priv; BUG_ON(svc->rqd != NULL); @@ -802,7 +802,7 @@ runq_assign(const struct scheduler *ops, } static void -__runq_deassign(struct csched_vcpu *svc) +__runq_deassign(struct csched2_vcpu *svc) { BUG_ON(__vcpu_on_runq(svc)); BUG_ON(test_bit(__CSFLAG_scheduled, &svc->flags)); @@ -819,7 +819,7 @@ static void static void runq_deassign(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu *svc = vc->sched_priv; + struct csched2_vcpu *svc = vc->sched_priv; BUG_ON(svc->rqd != RQD(ops, vc->processor)); @@ -827,11 +827,11 @@ runq_deassign(const struct scheduler *op } static void -csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) +csched2_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu *svc = vc->sched_priv; + struct csched2_vcpu *svc = vc->sched_priv; struct domain * const dom = vc->domain; - struct csched_dom * const sdom = svc->sdom; + struct csched2_dom * const sdom = svc->sdom; printk("%s: Inserting d%dv%d\n", __func__, dom->domain_id, vc->vcpu_id); @@ -854,22 +854,22 @@ csched_vcpu_insert(const struct schedule sdom->nr_vcpus++; } - CSCHED_VCPU_CHECK(vc); + CSCHED2_VCPU_CHECK(vc); } static void -csched_free_vdata(const struct scheduler *ops, void *priv) +csched2_free_vdata(const struct scheduler *ops, void *priv) { - struct csched_vcpu *svc = priv; + struct csched2_vcpu *svc = priv; xfree(svc); } static void -csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) +csched2_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_dom * const sdom = svc->sdom; + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); + struct csched2_dom * const sdom = svc->sdom; BUG_ON( sdom == NULL ); BUG_ON( !list_empty(&svc->runq_elem) ); @@ -894,9 +894,9 @@ csched_vcpu_remove(const struct schedule } static void -csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) +csched2_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); BUG_ON( is_idle_vcpu(vc) ); @@ -913,9 +913,9 @@ csched_vcpu_sleep(const struct scheduler } static void -csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) +csched2_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); s_time_t now = 0; /* Schedule lock should be held at this point. */ @@ -966,9 +966,9 @@ out: } static void -csched_context_saved(const struct scheduler *ops, struct vcpu *vc) +csched2_context_saved(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); s_time_t now = NOW(); vcpu_schedule_lock_irq(vc); @@ -1004,9 +1004,9 @@ static int static int choose_cpu(const struct scheduler *ops, struct vcpu *vc) { - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_private *prv = CSCHED2_PRIV(ops); int i, min_rqi = -1, new_cpu; - struct csched_vcpu *svc = CSCHED_VCPU(vc); + struct csched2_vcpu *svc = CSCHED2_VCPU(vc); s_time_t min_avgload; BUG_ON(cpumask_empty(&prv->active_queues)); @@ -1063,7 +1063,7 @@ choose_cpu(const struct scheduler *ops, /* Find the runqueue with the lowest instantaneous load */ for_each_cpu(i, &prv->active_queues) { - struct csched_runqueue_data *rqd; + struct csched2_runqueue_data *rqd; s_time_t rqd_avgload; rqd = prv->rqd + i; @@ -1112,15 +1112,15 @@ typedef struct { typedef struct { /* NB: Modified by consider() */ s_time_t load_delta; - struct csched_vcpu * best_push_svc, *best_pull_svc; + struct csched2_vcpu * best_push_svc, *best_pull_svc; /* NB: Read by consider() */ - struct csched_runqueue_data *lrqd; - struct csched_runqueue_data *orqd; + struct csched2_runqueue_data *lrqd; + struct csched2_runqueue_data *orqd; } balance_state_t; static void consider(balance_state_t *st, - struct csched_vcpu *push_svc, - struct csched_vcpu *pull_svc) + struct csched2_vcpu *push_svc, + struct csched2_vcpu *pull_svc) { s_time_t l_load, o_load, delta; @@ -1153,8 +1153,8 @@ static void consider(balance_state_t *st void migrate(const struct scheduler *ops, - struct csched_vcpu *svc, - struct csched_runqueue_data *trqd, + struct csched2_vcpu *svc, + struct csched2_runqueue_data *trqd, s_time_t now) { if ( test_bit(__CSFLAG_scheduled, &svc->flags) ) @@ -1193,7 +1193,7 @@ void migrate(const struct scheduler *ops static void balance_load(const struct scheduler *ops, int cpu, s_time_t now) { - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_private *prv = CSCHED2_PRIV(ops); int i, max_delta_rqi = -1; struct list_head *push_iter, *pull_iter; @@ -1293,7 +1293,7 @@ retry: list_for_each( push_iter, &st.lrqd->svc ) { int inner_load_updated = 0; - struct csched_vcpu * push_svc = list_entry(push_iter, struct csched_vcpu, rqd_elem); + struct csched2_vcpu * push_svc = list_entry(push_iter, struct csched2_vcpu, rqd_elem); __update_svc_load(ops, push_svc, 0, now); @@ -1303,7 +1303,7 @@ retry: list_for_each( pull_iter, &st.orqd->svc ) { - struct csched_vcpu * pull_svc = list_entry(pull_iter, struct csched_vcpu, rqd_elem); + struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem); if ( ! inner_load_updated ) { @@ -1325,7 +1325,7 @@ retry: list_for_each( pull_iter, &st.orqd->svc ) { - struct csched_vcpu * pull_svc = list_entry(pull_iter, struct csched_vcpu, rqd_elem); + struct csched2_vcpu * pull_svc = list_entry(pull_iter, struct csched2_vcpu, rqd_elem); /* Skip this one if it's already been flagged to migrate */ if ( test_bit(__CSFLAG_runq_migrate_request, &pull_svc->flags) ) @@ -1349,7 +1349,7 @@ out: } static int -csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) +csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc) { int new_cpu; @@ -1359,14 +1359,14 @@ csched_cpu_pick(const struct scheduler * } static void -csched_vcpu_migrate( +csched2_vcpu_migrate( const struct scheduler *ops, struct vcpu *vc, unsigned int new_cpu) { - struct csched_vcpu * const svc = CSCHED_VCPU(vc); - struct csched_runqueue_data *trqd; + struct csched2_vcpu * const svc = CSCHED2_VCPU(vc); + struct csched2_runqueue_data *trqd; /* Check if new_cpu is valid */ - BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED_PRIV(ops)->initialized)); + BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED2_PRIV(ops)->initialized)); trqd = RQD(ops, new_cpu); @@ -1375,16 +1375,16 @@ csched_vcpu_migrate( } static int -csched_dom_cntl( +csched2_dom_cntl( const struct scheduler *ops, struct domain *d, struct xen_domctl_scheduler_op *op) { - struct csched_dom * const sdom = CSCHED_DOM(d); - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_dom * const sdom = CSCHED2_DOM(d); + struct csched2_private *prv = CSCHED2_PRIV(ops); unsigned long flags; - /* Must hold csched_priv lock to read and update sdom, + /* Must hold csched2_priv lock to read and update sdom, * runq lock to update csvcs. */ spin_lock_irqsave(&prv->lock, flags); @@ -1408,10 +1408,10 @@ csched_dom_cntl( /* Update weights for vcpus, and max_weight for runqueues on which they reside */ list_for_each ( iter, &sdom->vcpu ) { - struct csched_vcpu *svc = list_entry(iter, struct csched_vcpu, sdom_elem); + struct csched2_vcpu *svc = list_entry(iter, struct csched2_vcpu, sdom_elem); /* NB: Locking order is important here. Because we grab this lock here, we - * must never lock csched_priv.lock if we're holding a runqueue lock. + * must never lock csched2_priv.lock if we're holding a runqueue lock. * Also, calling vcpu_schedule_lock() is enough, since IRQs have already * been disabled. */ vcpu_schedule_lock(svc->vcpu); @@ -1432,12 +1432,12 @@ csched_dom_cntl( } static void * -csched_alloc_domdata(const struct scheduler *ops, struct domain *dom) +csched2_alloc_domdata(const struct scheduler *ops, struct domain *dom) { - struct csched_dom *sdom; + struct csched2_dom *sdom; int flags; - sdom = xzalloc(struct csched_dom); + sdom = xzalloc(struct csched2_dom); if ( sdom == NULL ) return NULL; @@ -1445,29 +1445,29 @@ csched_alloc_domdata(const struct schedu INIT_LIST_HEAD(&sdom->vcpu); INIT_LIST_HEAD(&sdom->sdom_elem); sdom->dom = dom; - sdom->weight = CSCHED_DEFAULT_WEIGHT; + sdom->weight = CSCHED2_DEFAULT_WEIGHT; sdom->nr_vcpus = 0; - spin_lock_irqsave(&CSCHED_PRIV(ops)->lock, flags); + spin_lock_irqsave(&CSCHED2_PRIV(ops)->lock, flags); - list_add_tail(&sdom->sdom_elem, &CSCHED_PRIV(ops)->sdom); + list_add_tail(&sdom->sdom_elem, &CSCHED2_PRIV(ops)->sdom); - spin_unlock_irqrestore(&CSCHED_PRIV(ops)->lock, flags); + spin_unlock_irqrestore(&CSCHED2_PRIV(ops)->lock, flags); return (void *)sdom; } static int -csched_dom_init(const struct scheduler *ops, struct domain *dom) +csched2_dom_init(const struct scheduler *ops, struct domain *dom) { - struct csched_dom *sdom; + struct csched2_dom *sdom; printk("%s: Initializing domain %d\n", __func__, dom->domain_id); if ( is_idle_domain(dom) ) return 0; - sdom = csched_alloc_domdata(ops, dom); + sdom = csched2_alloc_domdata(ops, dom); if ( sdom == NULL ) return -ENOMEM; @@ -1477,40 +1477,40 @@ csched_dom_init(const struct scheduler * } static void -csched_free_domdata(const struct scheduler *ops, void *data) +csched2_free_domdata(const struct scheduler *ops, void *data) { int flags; - struct csched_dom *sdom = data; + struct csched2_dom *sdom = data; - spin_lock_irqsave(&CSCHED_PRIV(ops)->lock, flags); + spin_lock_irqsave(&CSCHED2_PRIV(ops)->lock, flags); list_del_init(&sdom->sdom_elem); - spin_unlock_irqrestore(&CSCHED_PRIV(ops)->lock, flags); + spin_unlock_irqrestore(&CSCHED2_PRIV(ops)->lock, flags); xfree(data); } static void -csched_dom_destroy(const struct scheduler *ops, struct domain *dom) +csched2_dom_destroy(const struct scheduler *ops, struct domain *dom) { - struct csched_dom *sdom = CSCHED_DOM(dom); + struct csched2_dom *sdom = CSCHED2_DOM(dom); BUG_ON(!list_empty(&sdom->vcpu)); - csched_free_domdata(ops, CSCHED_DOM(dom)); + csched2_free_domdata(ops, CSCHED2_DOM(dom)); } /* How long should we let this vcpu run for? */ static s_time_t -csched_runtime(const struct scheduler *ops, int cpu, struct csched_vcpu *snext) +csched2_runtime(const struct scheduler *ops, int cpu, struct csched2_vcpu *snext) { - s_time_t time = CSCHED_MAX_TIMER; - struct csched_runqueue_data *rqd = RQD(ops, cpu); + s_time_t time = CSCHED2_MAX_TIMER; + struct csched2_runqueue_data *rqd = RQD(ops, cpu); struct list_head *runq = &rqd->runq; if ( is_idle_vcpu(snext->vcpu) ) - return CSCHED_MAX_TIMER; + return CSCHED2_MAX_TIMER; /* Basic time */ time = c2t(rqd, snext->credit, snext); @@ -1518,7 +1518,7 @@ csched_runtime(const struct scheduler *o /* Next guy on runqueue */ if ( ! list_empty(runq) ) { - struct csched_vcpu *svc = __runq_elem(runq->next); + struct csched2_vcpu *svc = __runq_elem(runq->next); s_time_t ntime; if ( ! is_idle_vcpu(svc->vcpu) ) @@ -1531,10 +1531,10 @@ csched_runtime(const struct scheduler *o } /* Check limits */ - if ( time < CSCHED_MIN_TIMER ) - time = CSCHED_MIN_TIMER; - else if ( time > CSCHED_MAX_TIMER ) - time = CSCHED_MAX_TIMER; + if ( time < CSCHED2_MIN_TIMER ) + time = CSCHED2_MIN_TIMER; + else if ( time > CSCHED2_MAX_TIMER ) + time = CSCHED2_MAX_TIMER; return time; } @@ -1544,28 +1544,28 @@ void __dump_execstate(void *unused); /* * Find a candidate. */ -static struct csched_vcpu * -runq_candidate(struct csched_runqueue_data *rqd, - struct csched_vcpu *scurr, +static struct csched2_vcpu * +runq_candidate(struct csched2_runqueue_data *rqd, + struct csched2_vcpu *scurr, int cpu, s_time_t now) { struct list_head *iter; - struct csched_vcpu *snext = NULL; + struct csched2_vcpu *snext = NULL; /* Default to current if runnable, idle otherwise */ if ( vcpu_runnable(scurr->vcpu) ) snext = scurr; else - snext = CSCHED_VCPU(idle_vcpu[cpu]); + snext = CSCHED2_VCPU(idle_vcpu[cpu]); list_for_each( iter, &rqd->runq ) { - struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, runq_elem); + struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem); /* If this is on a different processor, don't pull it unless - * its credit is at least CSCHED_MIGRATE_RESIST higher. */ + * its credit is at least CSCHED2_MIGRATE_RESIST higher. */ if ( svc->vcpu->processor != cpu - && snext->credit + CSCHED_MIGRATE_RESIST > svc->credit ) + && snext->credit + CSCHED2_MIGRATE_RESIST > svc->credit ) continue; /* If the next one on the list has more credit than current @@ -1586,17 +1586,17 @@ runq_candidate(struct csched_runqueue_da * fast for the common case. */ static struct task_slice -csched_schedule( +csched2_schedule( const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled) { const int cpu = smp_processor_id(); - struct csched_runqueue_data *rqd; - struct csched_vcpu * const scurr = CSCHED_VCPU(current); - struct csched_vcpu *snext = NULL; + struct csched2_runqueue_data *rqd; + struct csched2_vcpu * const scurr = CSCHED2_VCPU(current); + struct csched2_vcpu *snext = NULL; struct task_slice ret; SCHED_STAT_CRANK(schedule); - CSCHED_VCPU_CHECK(current); + CSCHED2_VCPU_CHECK(current); d2printk("sc p%d c d%dv%d now %"PRI_stime"\n", cpu, @@ -1604,7 +1604,7 @@ csched_schedule( scurr->vcpu->vcpu_id, now); - BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized)); + BUG_ON(!cpumask_test_cpu(cpu, &CSCHED2_PRIV(ops)->initialized)); rqd = RQD(ops, cpu); BUG_ON(!cpumask_test_cpu(cpu, &rqd->active)); @@ -1620,9 +1620,9 @@ csched_schedule( { int rq; other_rqi = -2; - for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues ) + for_each_cpu ( rq, &CSCHED2_PRIV(ops)->active_queues ) { - if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] ) + if ( scurr->rqd == &CSCHED2_PRIV(ops)->rqd[rq] ) { other_rqi = rq; break; @@ -1666,7 +1666,7 @@ csched_schedule( if ( tasklet_work_scheduled ) { trace_var(TRC_CSCHED2_SCHED_TASKLET, 0, 0, NULL); - snext = CSCHED_VCPU(idle_vcpu[cpu]); + snext = CSCHED2_VCPU(idle_vcpu[cpu]); } else snext=runq_candidate(rqd, scurr, cpu, now); @@ -1703,7 +1703,7 @@ csched_schedule( } /* Check for the reset condition */ - if ( snext->credit <= CSCHED_CREDIT_RESET ) + if ( snext->credit <= CSCHED2_CREDIT_RESET ) { reset_credit(ops, cpu, now); balance_load(ops, cpu, now); @@ -1718,7 +1718,7 @@ csched_schedule( /* Safe because lock for old processor is held */ if ( snext->vcpu->processor != cpu ) { - snext->credit += CSCHED_MIGRATE_COMPENSATION; + snext->credit += CSCHED2_MIGRATE_COMPENSATION; snext->vcpu->processor = cpu; ret.migrated = 1; } @@ -1736,15 +1736,15 @@ csched_schedule( /* * Return task to run next... */ - ret.time = csched_runtime(ops, cpu, snext); + ret.time = csched2_runtime(ops, cpu, snext); ret.task = snext->vcpu; - CSCHED_VCPU_CHECK(ret.task); + CSCHED2_VCPU_CHECK(ret.task); return ret; } static void -csched_dump_vcpu(struct csched_vcpu *svc) +csched2_dump_vcpu(struct csched2_vcpu *svc) { printk("[%i.%i] flags=%x cpu=%i", svc->vcpu->domain->domain_id, @@ -1758,10 +1758,10 @@ csched_dump_vcpu(struct csched_vcpu *svc } static void -csched_dump_pcpu(const struct scheduler *ops, int cpu) +csched2_dump_pcpu(const struct scheduler *ops, int cpu) { struct list_head *runq, *iter; - struct csched_vcpu *svc; + struct csched2_vcpu *svc; int loop; char cpustr[100]; @@ -1775,11 +1775,11 @@ csched_dump_pcpu(const struct scheduler printk("core=%s\n", cpustr); /* current VCPU */ - svc = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr); + svc = CSCHED2_VCPU(per_cpu(schedule_data, cpu).curr); if ( svc ) { printk("\trun: "); - csched_dump_vcpu(svc); + csched2_dump_vcpu(svc); } loop = 0; @@ -1789,22 +1789,22 @@ csched_dump_pcpu(const struct scheduler if ( svc ) { printk("\t%3d: ", ++loop); - csched_dump_vcpu(svc); + csched2_dump_vcpu(svc); } } } static void -csched_dump(const struct scheduler *ops) +csched2_dump(const struct scheduler *ops) { struct list_head *iter_sdom, *iter_svc; - struct csched_private *prv = CSCHED_PRIV(ops); + struct csched2_private *prv = CSCHED2_PRIV(ops); int i, loop; printk("Active queues: %d\n" "\tdefault-weight = %d\n", cpumask_weight(&prv->active_queues), - CSCHED_DEFAULT_WEIGHT); + CSCHED2_DEFAULT_WEIGHT); for_each_cpu(i, &prv->active_queues) { s_time_t fraction; @@ -1829,8 +1829,8 @@ csched_dump(const struct scheduler *ops) loop = 0; list_for_each( iter_sdom, &prv->sdom ) { - struct csched_dom *sdom; - sdom = list_entry(iter_sdom, struct csched_dom, sdom_elem); + struct csched2_dom *sdom; + sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem); printk("\tDomain: %d w %d v %d\n\t", sdom->dom->domain_id, @@ -1839,18 +1839,18 @@ csched_dump(const struct scheduler *ops) list_for_each( iter_svc, &sdom->vcpu ) { - struct csched_vcpu *svc; - svc = list_entry(iter_svc, struct csched_vcpu, sdom_elem); + struct csched2_vcpu *svc; + svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem); printk("\t%3d: ", ++loop); - csched_dump_vcpu(svc); + csched2_dump_vcpu(svc); } } } -static void activate_runqueue(struct csched_private *prv, int rqi) +static void activate_runqueue(struct csched2_private *prv, int rqi) { - struct csched_runqueue_data *rqd; + struct csched2_runqueue_data *rqd; rqd = prv->rqd + rqi; @@ -1865,9 +1865,9 @@ static void activate_runqueue(struct csc cpumask_set_cpu(rqi, &prv->active_queues); } -static void deactivate_runqueue(struct csched_private *prv, int rqi) +static void deactivate_runqueue(struct csched2_private *prv, int rqi) { - struct csched_runqueue_data *rqd; + struct csched2_runqueue_data *rqd; rqd = prv->rqd + rqi; @@ -1881,8 +1881,8 @@ static void init_pcpu(const struct sched static void init_pcpu(const struct scheduler *ops, int cpu) { int rqi, flags; - struct csched_private *prv = CSCHED_PRIV(ops); - struct csched_runqueue_data *rqd; + struct csched2_private *prv = CSCHED2_PRIV(ops); + struct csched2_runqueue_data *rqd; spinlock_t *old_lock; spin_lock_irqsave(&prv->lock, flags); @@ -1942,7 +1942,7 @@ static void init_pcpu(const struct sched } static void * -csched_alloc_pdata(const struct scheduler *ops, int cpu) +csched2_alloc_pdata(const struct scheduler *ops, int cpu) { /* Check to see if the cpu is online yet */ /* Note: cpu 0 doesn't get a STARTING callback */ @@ -1956,11 +1956,11 @@ csched_alloc_pdata(const struct schedule } static void -csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) +csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) { unsigned long flags; - struct csched_private *prv = CSCHED_PRIV(ops); - struct csched_runqueue_data *rqd; + struct csched2_private *prv = CSCHED2_PRIV(ops); + struct csched2_runqueue_data *rqd; struct schedule_data *sd = &per_cpu(schedule_data, cpu); int rqi; @@ -2004,14 +2004,14 @@ csched_free_pdata(const struct scheduler } static int -csched_cpu_starting(int cpu) +csched2_cpu_starting(int cpu) { struct scheduler *ops; /* Hope this is safe from cpupools switching things around. :-) */ ops = per_cpu(scheduler, cpu); - if ( ops->alloc_pdata == csched_alloc_pdata ) + if ( ops->alloc_pdata == csched2_alloc_pdata ) init_pcpu(ops, cpu); return NOTIFY_DONE; @@ -2026,7 +2026,7 @@ static int cpu_credit2_callback( switch ( action ) { case CPU_STARTING: - csched_cpu_starting(cpu); + csched2_cpu_starting(cpu); break; default: break; @@ -2040,17 +2040,17 @@ static struct notifier_block cpu_credit2 }; static int -csched_global_init(void) +csched2_global_init(void) { register_cpu_notifier(&cpu_credit2_nfb); return 0; } static int -csched_init(struct scheduler *ops) +csched2_init(struct scheduler *ops) { int i; - struct csched_private *prv; + struct csched2_private *prv; printk("Initializing Credit2 scheduler\n" \ " WARNING: This is experimental software in development.\n" \ @@ -2071,7 +2071,7 @@ csched_init(struct scheduler *ops) * set up basic structures, and a callback when the CPU info is * available. */ - prv = xzalloc(struct csched_private); + prv = xzalloc(struct csched2_private); if ( prv == NULL ) return -ENOMEM; ops->sched_data = prv; @@ -2091,49 +2091,49 @@ csched_init(struct scheduler *ops) } static void -csched_deinit(const struct scheduler *ops) +csched2_deinit(const struct scheduler *ops) { - struct csched_private *prv; + struct csched2_private *prv; - prv = CSCHED_PRIV(ops); + prv = CSCHED2_PRIV(ops); if ( prv != NULL ) xfree(prv); } -static struct csched_private _csched_priv; +static struct csched2_private _csched2_priv; const struct scheduler sched_credit2_def = { .name = "SMP Credit Scheduler rev2", .opt_name = "credit2", .sched_id = XEN_SCHEDULER_CREDIT2, - .sched_data = &_csched_priv, + .sched_data = &_csched2_priv, - .init_domain = csched_dom_init, - .destroy_domain = csched_dom_destroy, + .init_domain = csched2_dom_init, + .destroy_domain = csched2_dom_destroy, - .insert_vcpu = csched_vcpu_insert, - .remove_vcpu = csched_vcpu_remove, + .insert_vcpu = csched2_vcpu_insert, + .remove_vcpu = csched2_vcpu_remove, - .sleep = csched_vcpu_sleep, - .wake = csched_vcpu_wake, + .sleep = csched2_vcpu_sleep, + .wake = csched2_vcpu_wake, - .adjust = csched_dom_cntl, + .adjust = csched2_dom_cntl, - .pick_cpu = csched_cpu_pick, - .migrate = csched_vcpu_migrate, - .do_schedule = csched_schedule, - .context_saved = csched_context_saved, + .pick_cpu = csched2_cpu_pick, + .migrate = csched2_vcpu_migrate, + .do_schedule = csched2_schedule, + .context_saved = csched2_context_saved, - .dump_cpu_state = csched_dump_pcpu, - .dump_settings = csched_dump, - .global_init = csched_global_init, - .init = csched_init, - .deinit = csched_deinit, - .alloc_vdata = csched_alloc_vdata, - .free_vdata = csched_free_vdata, - .alloc_pdata = csched_alloc_pdata, - .free_pdata = csched_free_pdata, - .alloc_domdata = csched_alloc_domdata, - .free_domdata = csched_free_domdata, + .dump_cpu_state = csched2_dump_pcpu, + .dump_settings = csched2_dump, + .global_init = csched2_global_init, + .init = csched2_init, + .deinit = csched2_deinit, + .alloc_vdata = csched2_alloc_vdata, + .free_vdata = csched2_free_vdata, + .alloc_pdata = csched2_alloc_pdata, + .free_pdata = csched2_free_pdata, + .alloc_domdata = csched2_alloc_domdata, + .free_domdata = csched2_free_domdata, };