# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201516607 0
# Node ID 3bf41a61a0347b42419a4047f2fcd40fb41df570
# Parent 36a8e6763f3af8938f3c5c60e67cedd8a7ef0120
Introduce new vcpu_lock_affinity() and vcpu_unlock_affinity() helper
functions for use by x86's continue_hypercall_on_cpu().
This has two advantages:
1. We can lock out ordinary vcpu_set_affinity() commands from dom0.
2. We avoid the (in this case bogus) check for dom0_vcpus_pin.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset: 16896:7327e1c2a42ced6b1b473e0e738b12e141fb0584
xen-unstable date: Sat Jan 26 11:25:48 2008 +0000
---
xen/arch/x86/domain.c | 9 ++++-----
xen/common/schedule.c | 47 +++++++++++++++++++++++++++++++++++++++++------
xen/include/xen/sched.h | 4 ++++
3 files changed, 49 insertions(+), 11 deletions(-)
diff -r 36a8e6763f3a -r 3bf41a61a034 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Mon Jan 28 10:36:11 2008 +0000
+++ b/xen/arch/x86/domain.c Mon Jan 28 10:36:47 2008 +0000
@@ -1416,16 +1416,16 @@ static void continue_hypercall_on_cpu_he
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct migrate_info *info = v->arch.continue_info;
+ cpumask_t mask = info->saved_affinity;
regs->eax = info->func(info->data);
v->arch.schedule_tail = info->saved_schedule_tail;
- v->cpu_affinity = info->saved_affinity;
v->arch.continue_info = NULL;
xfree(info);
- vcpu_set_affinity(v, &v->cpu_affinity);
+ vcpu_unlock_affinity(v, &mask);
schedule_tail(v);
}
@@ -1433,7 +1433,6 @@ int continue_hypercall_on_cpu(int cpu, l
{
struct vcpu *v = current;
struct migrate_info *info;
- cpumask_t mask = cpumask_of_cpu(cpu);
int rc;
if ( cpu == smp_processor_id() )
@@ -1446,12 +1445,12 @@ int continue_hypercall_on_cpu(int cpu, l
info->func = func;
info->data = data;
info->saved_schedule_tail = v->arch.schedule_tail;
- info->saved_affinity = v->cpu_affinity;
+ info->saved_affinity = cpumask_of_cpu(cpu);
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
- rc = vcpu_set_affinity(v, &mask);
+ rc = vcpu_lock_affinity(v, &info->saved_affinity);
if ( rc )
{
v->arch.schedule_tail = info->saved_schedule_tail;
diff -r 36a8e6763f3a -r 3bf41a61a034 xen/common/schedule.c
--- a/xen/common/schedule.c Mon Jan 28 10:36:11 2008 +0000
+++ b/xen/common/schedule.c Mon Jan 28 10:36:47 2008 +0000
@@ -262,12 +262,11 @@ void vcpu_force_reschedule(struct vcpu *
}
}
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
-{
- cpumask_t online_affinity;
-
- if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
- return -EINVAL;
+static int __vcpu_set_affinity(
+ struct vcpu *v, cpumask_t *affinity,
+ bool_t old_lock_status, bool_t new_lock_status)
+{
+ cpumask_t online_affinity, old_affinity;
cpus_and(online_affinity, *affinity, cpu_online_map);
if ( cpus_empty(online_affinity) )
@@ -275,7 +274,18 @@ int vcpu_set_affinity(struct vcpu *v, cp
vcpu_schedule_lock_irq(v);
+ if ( v->affinity_locked != old_lock_status )
+ {
+ BUG_ON(!v->affinity_locked);
+ vcpu_schedule_unlock_irq(v);
+ return -EBUSY;
+ }
+
+ v->affinity_locked = new_lock_status;
+
+ old_affinity = v->cpu_affinity;
v->cpu_affinity = *affinity;
+ *affinity = old_affinity;
if ( !cpu_isset(v->processor, v->cpu_affinity) )
set_bit(_VPF_migrating, &v->pause_flags);
@@ -288,6 +298,31 @@ int vcpu_set_affinity(struct vcpu *v, cp
}
return 0;
+}
+
+int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
+ return -EINVAL;
+ return __vcpu_set_affinity(v, affinity, 0, 0);
+}
+
+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ return __vcpu_set_affinity(v, affinity, 0, 1);
+}
+
+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ cpumask_t online_affinity;
+
+ /* Do not fail if no CPU in old affinity mask is online. */
+ cpus_and(online_affinity, *affinity, cpu_online_map);
+ if ( cpus_empty(online_affinity) )
+ *affinity = cpu_online_map;
+
+ if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
+ BUG();
}
/* Block the currently-executing domain until a pertinent event occurs. */
diff -r 36a8e6763f3a -r 3bf41a61a034 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Mon Jan 28 10:36:11 2008 +0000
+++ b/xen/include/xen/sched.h Mon Jan 28 10:36:47 2008 +0000
@@ -122,6 +122,8 @@ struct vcpu
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
+ /* VCPU affinity is temporarily locked from controller changes? */
+ bool_t affinity_locked;
unsigned long pause_flags;
atomic_t pause_count;
@@ -483,6 +485,8 @@ void cpu_init(void);
void vcpu_force_reschedule(struct vcpu *v);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|