# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Node ID cb8eeadd8eaee16924a4a02a1955c5145f0bfcec
# Parent a174f9787014e4887d16a3f2685dd31b15c9975a
[XEN] When balancing idlers per socket/core, do it one at a time.
Signed-off-by: Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
---
xen/common/sched_credit.c | 13 ++++++++-----
1 files changed, 8 insertions(+), 5 deletions(-)
diff -r a174f9787014 -r cb8eeadd8eae xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue Nov 07 10:19:20 2006 +0000
+++ b/xen/common/sched_credit.c Tue Nov 07 10:37:30 2006 +0000
@@ -119,6 +119,7 @@
_MACRO(steal_peer_best_idler) \
_MACRO(steal_loner_candidate) \
_MACRO(steal_loner_signal) \
+ _MACRO(cpu_pick) \
_MACRO(dom_init) \
_MACRO(dom_destroy) \
_MACRO(vcpu_init) \
@@ -722,6 +723,8 @@ csched_cpu_pick(struct vcpu *vc)
cpumask_t cpus;
int cpu, nxt;
+ CSCHED_STAT_CRANK(cpu_pick);
+
/*
* Pick from online CPUs in VCPU's affinity mask, giving a
* preference to its current processor if it's in there.
@@ -1186,6 +1189,7 @@ csched_load_balance(int cpu, struct csch
peer_vcpu = per_cpu(schedule_data, peer_cpu).curr;
spc = CSCHED_PCPU(peer_cpu);
+ /* Signal the first candidate only. */
if ( !is_idle_vcpu(peer_vcpu) &&
is_idle_vcpu(__runq_elem(spc->runq.next)->vcpu) &&
__csched_running_vcpu_is_stealable(cpu, peer_vcpu) )
@@ -1195,11 +1199,10 @@ csched_load_balance(int cpu, struct csch
CSCHED_STAT_CRANK(steal_loner_signal);
cpu_raise_softirq(peer_cpu, SCHEDULE_SOFTIRQ);
- }
- else
- {
- spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
- }
+ break;
+ }
+
+ spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
}
/* Failed to find more important work elsewhere... */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|