None of the callers actually make use of the function's returning of
the old affinity through its second parameter, and eliminating this
capability allows some callers to no longer use a local variable here,
reducing their stack footprint significantly when building with large
NR_CPUS.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -304,7 +304,6 @@ int vmce_wrmsr(u32 msr, u64 val)
int inject_vmce(struct domain *d)
{
int cpu = smp_processor_id();
- cpumask_t affinity;
/* PV guest and HVM guest have different vMCE# injection methods. */
if ( !test_and_set_bool(d->vcpu[0]->mce_pending) )
@@ -323,11 +322,9 @@ int inject_vmce(struct domain *d)
{
cpumask_copy(d->vcpu[0]->cpu_affinity_tmp,
d->vcpu[0]->cpu_affinity);
- cpus_clear(affinity);
- cpu_set(cpu, affinity);
mce_printk(MCE_VERBOSE, "MCE: CPU%d set affinity, old %d\n",
cpu, d->vcpu[0]->processor);
- vcpu_set_affinity(d->vcpu[0], &affinity);
+ vcpu_set_affinity(d->vcpu[0], cpumask_of(cpu));
vcpu_kick(d->vcpu[0]);
}
else
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3113,7 +3113,6 @@ static void nmi_mce_softirq(void)
{
int cpu = smp_processor_id();
struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
- cpumask_t affinity;
BUG_ON(st == NULL);
BUG_ON(st->vcpu == NULL);
@@ -3129,9 +3128,7 @@ static void nmi_mce_softirq(void)
* Make sure to wakeup the vcpu on the
* specified processor.
*/
- cpus_clear(affinity);
- cpu_set(st->processor, affinity);
- vcpu_set_affinity(st->vcpu, &affinity);
+ vcpu_set_affinity(st->vcpu, cpumask_of(st->processor));
/* Affinity is restored in the iret hypercall. */
}
@@ -3201,14 +3198,11 @@ void async_exception_cleanup(struct vcpu
!test_and_set_bool(curr->mce_pending) )
{
int cpu = smp_processor_id();
- cpumask_t affinity;
cpumask_copy(curr->cpu_affinity_tmp, curr->cpu_affinity);
- cpus_clear(affinity);
- cpu_set(cpu, affinity);
printk(XENLOG_DEBUG "MCE: CPU%d set affinity, old %d\n",
cpu, curr->processor);
- vcpu_set_affinity(curr, &affinity);
+ vcpu_set_affinity(curr, cpumask_of(cpu));
}
}
}
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -587,9 +587,9 @@ int cpu_disable_scheduler(unsigned int c
return ret;
}
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
+int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity)
{
- cpumask_t online_affinity, old_affinity;
+ cpumask_t online_affinity;
cpumask_t *online;
if ( v->domain->is_pinned )
@@ -601,9 +601,7 @@ int vcpu_set_affinity(struct vcpu *v, cp
vcpu_schedule_lock_irq(v);
- cpumask_copy(&old_affinity, v->cpu_affinity);
cpumask_copy(v->cpu_affinity, affinity);
- cpumask_copy(affinity, &old_affinity);
if ( !cpumask_test_cpu(v->processor, v->cpu_affinity) )
set_bit(_VPF_migrating, &v->pause_flags);
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -617,7 +617,7 @@ void scheduler_free(struct scheduler *sc
int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
+int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|