# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3c84ce41d184e6e28b2c36b5b5af89719f456933
# Parent 82eafda1c710918f7b1b18aa658323cc75cf3967
Change the context-switch interface. Get rid of
context_switch_finalise(). Instead provide a back-call
context_switch_done() for situations where arch-specific
context_switch() function does not return to the caller,
or needs to do some parts of state restoration with
interrupts enabled.
Get rid of ugly hack in arch/ia64.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 82eafda1c710 -r 3c84ce41d184 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c Fri Jan 6 16:45:31 2006
+++ b/xen/arch/ia64/xen/process.c Fri Jan 6 17:14:29 2006
@@ -71,12 +71,10 @@
//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
- // TG: Real HACK FIXME.
- // This is currently necessary because when a new domain is started,
- // the context_switch function of xen/common/schedule.c(__enter_scheduler)
- // never returns. Therefore, the lock must be released.
- // schedule_tail is only called when a domain is started.
- spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
+ // This is necessary because when a new domain is started, our
+ // implementation of context_switch() does not return (switch_to() has
+ // special and peculiar behaviour in this case).
+ context_switch_done();
/* rr7 will be postponed to last point when resuming back to guest */
if(VMX_DOMAIN(current)){
diff -r 82eafda1c710 -r 3c84ce41d184 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c Fri Jan 6 16:45:31 2006
+++ b/xen/arch/ia64/xen/xenmisc.c Fri Jan 6 17:14:29 2006
@@ -327,11 +327,6 @@
}
if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
}
-}
-
-void context_switch_finalise(struct vcpu *next)
-{
- /* nothing to do */
}
void continue_running(struct vcpu *same)
diff -r 82eafda1c710 -r 3c84ce41d184 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Fri Jan 6 16:45:31 2006
+++ b/xen/arch/x86/domain.c Fri Jan 6 17:14:29 2006
@@ -46,7 +46,6 @@
struct percpu_ctxt {
struct vcpu *curr_vcpu;
- unsigned int context_not_finalised;
unsigned int dirty_segment_mask;
} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
@@ -758,21 +757,9 @@
!is_idle_domain(next->domain) )
{
__context_switch();
- percpu_ctxt[cpu].context_not_finalised = 1;
- }
-}
-
-void context_switch_finalise(struct vcpu *next)
-{
- unsigned int cpu = smp_processor_id();
-
- ASSERT(local_irq_is_enabled());
-
- if ( percpu_ctxt[cpu].context_not_finalised )
- {
- percpu_ctxt[cpu].context_not_finalised = 0;
-
- BUG_ON(percpu_ctxt[cpu].curr_vcpu != next);
+
+ context_switch_done();
+ ASSERT(local_irq_is_enabled());
if ( VMX_DOMAIN(next) )
{
@@ -784,6 +771,10 @@
load_segments(next);
vmx_load_msrs(next);
}
+ }
+ else
+ {
+ context_switch_done();
}
schedule_tail(next);
diff -r 82eafda1c710 -r 3c84ce41d184 xen/common/schedule.c
--- a/xen/common/schedule.c Fri Jan 6 16:45:31 2006
+++ b/xen/common/schedule.c Fri Jan 6 17:14:29 2006
@@ -474,11 +474,18 @@
prev->domain->domain_id, prev->vcpu_id,
next->domain->domain_id, next->vcpu_id);
+ schedule_data[cpu].context_switch_in_progress = 1;
context_switch(prev, next);
-
+ if ( schedule_data[cpu].context_switch_in_progress )
+ context_switch_done();
+}
+
+void context_switch_done(void)
+{
+ unsigned int cpu = smp_processor_id();
+ ASSERT(schedule_data[cpu].context_switch_in_progress);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
-
- context_switch_finalise(next);
+ schedule_data[cpu].context_switch_in_progress = 0;
}
/* No locking needed -- pointer comparison is safe :-) */
diff -r 82eafda1c710 -r 3c84ce41d184 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h Fri Jan 6 16:45:31 2006
+++ b/xen/include/xen/sched-if.h Fri Jan 6 17:14:29 2006
@@ -13,11 +13,12 @@
struct schedule_data {
spinlock_t schedule_lock; /* spinlock protecting curr */
- struct vcpu *curr; /* current task */
- struct vcpu *idle; /* idle task for this cpu */
+ struct vcpu *curr; /* current task */
+ struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
struct ac_timer s_timer; /* scheduling timer */
unsigned long tick; /* current periodic 'tick' */
+ int context_switch_in_progress;
#ifdef BUCKETS
u32 hist[BUCKETS]; /* for scheduler latency histogram */
#endif
diff -r 82eafda1c710 -r 3c84ce41d184 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Fri Jan 6 16:45:31 2006
+++ b/xen/include/xen/sched.h Fri Jan 6 17:14:29 2006
@@ -287,13 +287,17 @@
struct vcpu *next);
/*
- * On some architectures (notably x86) it is not possible to entirely load
- * @next's context with interrupts disabled. These may implement a function to
- * finalise loading the new context after interrupts are re-enabled. This
- * function is not given @prev and is not permitted to access it.
- */
-extern void context_switch_finalise(
- struct vcpu *next);
+ * If context_switch() does not return to the caller, or you need to perform
+ * some aspects of state restoration with interrupts enabled, then you must
+ * call context_switch_done() at a suitable safe point.
+ *
+ * As when returning from context_switch(), the caller must ensure that the
+ * local CPU is no longer running in the previous VCPU's context, and that the
+ * context is saved to memory. Alternatively, if implementing lazy context
+ * switching, ensure that invoking sync_vcpu_execstate() will switch and
+ * commit the previous VCPU's state.
+ */
+extern void context_switch_done(void);
/* Called by the scheduler to continue running the current VCPU. */
extern void continue_running(
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|