[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/S3: Fix cpu pool scheduling after suspend/resume (v2)
This review is another S3 scheduler problem with the system_state variable introduced with the following changeset: http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=269f543ea750ed567d18f2e819e5d5ce58eda5c5 Specifically, the cpu_callback function that takes the CPU down during suspend, and back up during resume. We were seeing situations where, after S3, only CPU0 was in cpupool0. Guest performance suffered greatly, since all vcpus were only on a single pcpu. Guests under high CPU load showed the problem much more quickly than an idle guest. Removing this if condition forces the CPUs to go through the expected online/offline state, and be properly scheduled after S3. This also includes a necessary partial change proposed earlier by Tomasz Wroblewski here: http://lists.xen.org/archives/html/xen-devel/2013-01/msg02206.html It should also resolve the issues discussed in this thread: http://lists.xen.org/archives/html/xen-devel/2012-11/msg01801.html v2: Address concerns from Juergen Gross about the cpus not being put back into the pool they were in prior to suspend Signed-off-by: Ben Guthro <benjamin.guthro@xxxxxxxxxx> --- xen/common/cpupool.c | 39 +++++++++++++++++++++++++++++++-------- xen/include/xen/sched-if.h | 1 + 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index 10b10f8..4cc2bf9 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -41,7 +41,9 @@ static struct cpupool *alloc_cpupool_struct(void) { struct cpupool *c = xzalloc(struct cpupool); - if ( c && zalloc_cpumask_var(&c->cpu_valid) ) + if ( c && + zalloc_cpumask_var(&c->cpu_suspended) && + zalloc_cpumask_var(&c->cpu_valid) ) return c; xfree(c); return NULL; @@ -49,8 +51,10 @@ static struct cpupool *alloc_cpupool_struct(void) static void free_cpupool_struct(struct cpupool *c) { - if ( c ) + if ( c ) { + free_cpumask_var(c->cpu_suspended); free_cpumask_var(c->cpu_valid); + } xfree(c); } @@ -417,14 +421,32 @@ void cpupool_rm_domain(struct domain *d) /* * called to add a new cpu to pool admin - * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0 + * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0, + * unless we are resuming from S3, in which case we put the cpu back + * in the cpupool it was in prior to suspend. */ static void cpupool_cpu_add(unsigned int cpu) { + struct cpupool **c; spin_lock(&cpupool_lock); + cpumask_clear_cpu(cpu, &cpupool_locked_cpus); cpumask_set_cpu(cpu, &cpupool_free_cpus); - cpupool_assign_cpu_locked(cpupool0, cpu); + + if ( system_state == SYS_STATE_resume ) + { + for_each_cpupool(c) + { + if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) ) + { + cpupool_assign_cpu_locked((*c), cpu); + cpumask_clear_cpu(cpu, (*c)->cpu_suspended); + } + } + } + + if ( cpumask_test_cpu(cpu, &cpupool_free_cpus) ) + cpupool_assign_cpu_locked(cpupool0, cpu); spin_unlock(&cpupool_lock); } @@ -630,12 +652,14 @@ void dump_runq(unsigned char key) static int cpu_callback( struct notifier_block *nfb, unsigned long action, void *hcpu) { + struct cpupool **c; unsigned int cpu = (unsigned long)hcpu; int rc = 0; - if ( (system_state == SYS_STATE_suspend) || - (system_state == SYS_STATE_resume) ) - goto out; + if ( system_state == SYS_STATE_suspend ) + for_each_cpupool(c) + if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) ) + cpumask_set_cpu(cpu, (*c)->cpu_suspended); switch ( action ) { @@ -650,7 +674,6 @@ static int cpu_callback( break; } -out: return !rc ? NOTIFY_DONE : notifier_from_errno(rc); } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 9ace22c..84bb13f 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -201,6 +201,7 @@ struct cpupool { int cpupool_id; cpumask_var_t cpu_valid; /* all cpus assigned to pool */ + cpumask_var_t cpu_suspended; /* cpus in S3 that should be in this pool */ struct cpupool *next; unsigned int n_dom; struct scheduler *sched; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |