|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] xen/sched_rt: Move repl_timer into struct rt_private
struct timer is only 48 bytes and repl_timer has a 1-to-1 correspondance with
struct rt_private, so having it referenced by pointer is wasteful.
This avoids one memory allocation in rt_init(), and the resulting diffstat is:
add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-156 (-156)
function old new delta
rt_switch_sched 134 133 -1
rt_context_saved 278 271 -7
rt_vcpu_remove 253 245 -8
rt_vcpu_sleep 234 218 -16
repl_timer_handler 761 744 -17
rt_deinit 44 20 -24
rt_init 219 136 -83
As an extra bit of cleanup noticed while making this change, there is no need
to call cpumask_clear() on an zeroed memory allocation.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Dario Faggioli <dfaggioli@xxxxxxxx>
CC: Meng Xu <mengxu@xxxxxxxxxxxxx>
Also noticed by chance while inspecting the disassembly delta for "x86/bitops:
Introduce variable/constant pairs for __{set,clear,change}_bit()"
---
xen/common/sched_rt.c | 45 +++++++++++++++++----------------------------
1 file changed, 17 insertions(+), 28 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index b770287..a202802 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -186,7 +186,7 @@ struct rt_private {
struct list_head runq; /* ordered list of runnable vcpus */
struct list_head depletedq; /* unordered list of depleted vcpus */
- struct timer *repl_timer; /* replenishment timer */
+ struct timer repl_timer; /* replenishment timer */
struct list_head replq; /* ordered list of vcpus that need
replenishment */
cpumask_t tickled; /* cpus been tickled */
@@ -554,10 +554,10 @@ replq_remove(const struct scheduler *ops, struct rt_vcpu
*svc)
if ( !list_empty(replq) )
{
struct rt_vcpu *svc_next = replq_elem(replq->next);
- set_timer(prv->repl_timer, svc_next->cur_deadline);
+ set_timer(&prv->repl_timer, svc_next->cur_deadline);
}
else
- stop_timer(prv->repl_timer);
+ stop_timer(&prv->repl_timer);
}
}
@@ -597,7 +597,7 @@ replq_insert(const struct scheduler *ops, struct rt_vcpu
*svc)
* at the front of the event list.
*/
if ( deadline_replq_insert(svc, &svc->replq_elem, replq) )
- set_timer(prv->repl_timer, svc->cur_deadline);
+ set_timer(&prv->repl_timer, svc->cur_deadline);
}
/*
@@ -634,7 +634,7 @@ replq_reinsert(const struct scheduler *ops, struct rt_vcpu
*svc)
rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
if ( rearm )
- set_timer(rt_priv(ops)->repl_timer, rearm_svc->cur_deadline);
+ set_timer(&rt_priv(ops)->repl_timer, rearm_svc->cur_deadline);
}
/*
@@ -676,27 +676,18 @@ rt_init(struct scheduler *ops)
if ( prv == NULL )
goto err;
- prv->repl_timer = xzalloc(struct timer);
- if ( prv->repl_timer == NULL )
- goto err;
-
spin_lock_init(&prv->lock);
INIT_LIST_HEAD(&prv->sdom);
INIT_LIST_HEAD(&prv->runq);
INIT_LIST_HEAD(&prv->depletedq);
INIT_LIST_HEAD(&prv->replq);
- cpumask_clear(&prv->tickled);
-
ops->sched_data = prv;
rc = 0;
err:
- if ( rc && prv )
- {
- xfree(prv->repl_timer);
+ if ( rc )
xfree(prv);
- }
return rc;
}
@@ -706,9 +697,8 @@ rt_deinit(struct scheduler *ops)
{
struct rt_private *prv = rt_priv(ops);
- ASSERT(prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed);
- xfree(prv->repl_timer);
+ ASSERT(prv->repl_timer.status == TIMER_STATUS_invalid ||
+ prv->repl_timer.status == TIMER_STATUS_killed);
ops->sched_data = NULL;
xfree(prv);
@@ -731,9 +721,9 @@ rt_init_pdata(const struct scheduler *ops, void *pdata, int
cpu)
* TIMER_STATUS_invalid means we are the first cpu that sees the timer
* allocated but not initialized, and so it's up to us to initialize it.
*/
- if ( prv->repl_timer->status == TIMER_STATUS_invalid )
+ if ( prv->repl_timer.status == TIMER_STATUS_invalid )
{
- init_timer(prv->repl_timer, repl_timer_handler, (void*) ops, cpu);
+ init_timer(&prv->repl_timer, repl_timer_handler, (void *)ops, cpu);
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
@@ -769,10 +759,10 @@ rt_switch_sched(struct scheduler *new_ops, unsigned int
cpu,
* removed (in which case we'll see TIMER_STATUS_killed), it's our
* job to (re)initialize the timer.
*/
- if ( prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed )
+ if ( prv->repl_timer.status == TIMER_STATUS_invalid ||
+ prv->repl_timer.status == TIMER_STATUS_killed )
{
- init_timer(prv->repl_timer, repl_timer_handler, (void*) new_ops, cpu);
+ init_timer(&prv->repl_timer, repl_timer_handler, (void *)new_ops, cpu);
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
@@ -797,7 +787,7 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu,
int cpu)
spin_lock_irqsave(&prv->lock, flags);
- if ( prv->repl_timer->cpu == cpu )
+ if ( prv->repl_timer.cpu == cpu )
{
struct cpupool *c = per_cpu(cpupool, cpu);
unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
@@ -809,12 +799,12 @@ rt_deinit_pdata(const struct scheduler *ops, void *pcpu,
int cpu)
*/
if ( new_cpu >= nr_cpu_ids )
{
- kill_timer(prv->repl_timer);
+ kill_timer(&prv->repl_timer);
dprintk(XENLOG_DEBUG, "RTDS: timer killed on cpu %d\n", cpu);
}
else
{
- migrate_timer(prv->repl_timer, new_cpu);
+ migrate_timer(&prv->repl_timer, new_cpu);
}
}
@@ -1505,7 +1495,6 @@ static void repl_timer_handler(void *data){
struct rt_private *prv = rt_priv(ops);
struct list_head *replq = rt_replq(ops);
struct list_head *runq = rt_runq(ops);
- struct timer *repl_timer = prv->repl_timer;
struct list_head *iter, *tmp;
struct rt_vcpu *svc;
LIST_HEAD(tmp_replq);
@@ -1571,7 +1560,7 @@ static void repl_timer_handler(void *data){
* the one in the front.
*/
if ( !list_empty(replq) )
- set_timer(repl_timer, replq_elem(replq->next)->cur_deadline);
+ set_timer(&prv->repl_timer, replq_elem(replq->next)->cur_deadline);
spin_unlock_irq(&prv->lock);
}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |