# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 5a97ee0633e8e48c3d86fc007b8e0ea80a0581c9
# Parent 92c6021f23e4bb7fe1254f21d6a24d07d62f0e54
Cleanup CPU hotplug and save/restore. Next step is to
simplify SMP initial boot and then should be easy to merge
i386 and x86/64 smpboot.c.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Thu Oct 13
14:26:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Thu Oct 13
16:08:59 2005
@@ -112,44 +112,6 @@
}
}
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
-#ifdef CONFIG_SMP
-extern void smp_suspend(void);
-extern void smp_resume(void);
-#endif
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
-{
- /* Death loop */
- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
- HYPERVISOR_sched_op(SCHEDOP_yield, 0);
-
- __flush_tlb_all();
- /*
- * Restore IPI/IRQ mappings before marking online to prevent
- * race between pending interrupts and restoration of handler.
- */
-#ifdef CONFIG_SMP
- local_irq_enable(); /* XXX Needed for smp_resume(). Clean me up. */
- smp_resume();
-#endif
- cpu_set(smp_processor_id(), cpu_online_map);
-}
-#else
-static inline void play_dead(void)
-{
- BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-void cpu_restore(void)
-{
- play_dead();
- local_irq_enable();
- cpu_idle();
-}
-
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
@@ -158,7 +120,9 @@
*/
void cpu_idle (void)
{
+#if defined(CONFIG_HOTPLUG_CPU)
int cpu = _smp_processor_id();
+#endif
/* endless idle loop with no priority at all */
while (1) {
@@ -168,23 +132,12 @@
__get_cpu_var(cpu_idle_state) = 0;
rmb();
+#if defined(CONFIG_HOTPLUG_CPU)
if (cpu_is_offline(cpu)) {
- local_irq_disable();
-#ifdef CONFIG_SMP
- smp_suspend();
-#endif
-#if defined(CONFIG_XEN) && defined(CONFIG_HOTPLUG_CPU)
- /* Ack it. From this point on until
- we get woken up, we're not allowed
- to take any locks. In particular,
- don't printk. */
- __get_cpu_var(cpu_state) = CPU_DEAD;
- /* Tell hypervisor to take vcpu down. */
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
-#endif
- play_dead();
local_irq_enable();
}
+#endif
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
xen_idle();
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Oct 13
14:26:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Thu Oct 13
16:08:59 2005
@@ -66,6 +66,9 @@
#include <asm-xen/xen-public/vcpu.h>
#include <asm-xen/xenbus.h>
+static void xen_smp_intr_init(unsigned int cpu);
+static void xen_smp_intr_exit(unsigned int cpu);
+
/* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping;
@@ -352,9 +355,9 @@
static void __init smp_callin(void)
{
int cpuid, phys_id;
+#if 0
unsigned long timeout;
-#if 0
/*
* If waken up by an INIT in an 82489DX configuration
* we may get here before an INIT-deassert IPI reaches
@@ -376,6 +379,7 @@
}
Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
+#if 0
/*
* STARTUP IPIs are fragile beasts as they might sometimes
* trigger some glue motherboard logic. Complete APIC bus
@@ -403,7 +407,6 @@
BUG();
}
-#if 0
/*
* the boot CPU has finished the init stage and is spinning
* on callin_map until we finish. We are free to set up this
@@ -448,8 +451,6 @@
static int cpucount;
-extern void local_setup_timer(void);
-
/*
* Activate a secondary processor.
*/
@@ -464,8 +465,6 @@
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
rep_nop();
- local_setup_timer();
- smp_intr_init();
local_irq_enable();
/*
* low-memory mappings have been cleared, flush them from
@@ -1133,7 +1132,7 @@
return;
}
- smp_intr_init();
+ xen_smp_intr_init(0);
#if 0
connect_bsp_APIC();
@@ -1340,29 +1339,6 @@
subsys_initcall(setup_vcpu_hotplug_event);
-/* must be called with the cpucontrol mutex held */
-static int __devinit cpu_enable(unsigned int cpu)
-{
-#ifdef CONFIG_SMP_ALTERNATIVES
- if (num_online_cpus() == 1)
- prepare_for_smp();
-#endif
-
- /* get the target out of its holding state */
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- wmb();
-
- /* wait for the processor to ack it. timeout? */
- while (!cpu_online(cpu))
- cpu_relax();
-
- fixup_irqs(cpu_online_map);
-
- /* counter the disable in fixup_irqs() */
- local_irq_enable();
- return 0;
-}
-
int __cpu_disable(void)
{
cpumask_t map = cpu_online_map;
@@ -1385,27 +1361,22 @@
/* It's now safe to remove this processor from the online map */
cpu_clear(cpu, cpu_online_map);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ/10);
+ }
+
+ xen_smp_intr_exit(cpu);
+
#ifdef CONFIG_SMP_ALTERNATIVES
if (num_online_cpus() == 1)
unprepare_for_smp();
#endif
-
- return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
- /* We don't do anything here: idle task is faking death itself. */
- unsigned int i;
-
- for (i = 0; i < 10; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD)
- return;
- current->state = TASK_UNINTERRUPTIBLE;
- schedule_timeout(HZ/10);
- }
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -1430,23 +1401,16 @@
return -EIO;
}
-#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_XEN
- /* Tell hypervisor to bring vcpu up. */
+#ifdef CONFIG_SMP_ALTERNATIVES
+ if (num_online_cpus() == 1)
+ prepare_for_smp();
+#endif
+
+ xen_smp_intr_init(cpu);
+ cpu_set(cpu, smp_commenced_mask);
+ cpu_set(cpu, cpu_online_map);
HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
-#endif
- /* Already up, and in cpu_quiescent now? */
- if (cpu_isset(cpu, smp_commenced_mask)) {
- cpu_enable(cpu);
- return 0;
- }
-#endif
-
- local_irq_enable();
- /* Unleash the CPU! */
- cpu_set(cpu, smp_commenced_mask);
- while (!cpu_isset(cpu, cpu_online_map))
- mb();
+
return 0;
}
@@ -1468,48 +1432,38 @@
extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
-void smp_intr_init(void)
-{
- int cpu = smp_processor_id();
-
+extern void local_setup_timer(unsigned int cpu);
+extern void local_teardown_timer(unsigned int cpu);
+
+static void xen_smp_intr_init(unsigned int cpu)
+{
per_cpu(resched_irq, cpu) =
- bind_ipi_to_irq(RESCHEDULE_VECTOR);
+ bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu);
sprintf(resched_name[cpu], "resched%d", cpu);
BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
SA_INTERRUPT, resched_name[cpu], NULL));
per_cpu(callfunc_irq, cpu) =
- bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
+ bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu);
sprintf(callfunc_name[cpu], "callfunc%d", cpu);
BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
smp_call_function_interrupt,
SA_INTERRUPT, callfunc_name[cpu], NULL));
-}
-
-static void smp_intr_exit(void)
-{
- int cpu = smp_processor_id();
+
+ if (cpu != 0)
+ local_setup_timer(cpu);
+}
+
+static void xen_smp_intr_exit(unsigned int cpu)
+{
+ if (cpu != 0)
+ local_teardown_timer(cpu);
free_irq(per_cpu(resched_irq, cpu), NULL);
- unbind_ipi_from_irq(RESCHEDULE_VECTOR);
+ unbind_ipi_from_irq(RESCHEDULE_VECTOR, cpu);
free_irq(per_cpu(callfunc_irq, cpu), NULL);
- unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
-}
-
-extern void local_setup_timer_irq(void);
-extern void local_teardown_timer_irq(void);
-
-void smp_suspend(void)
-{
- local_teardown_timer_irq();
- smp_intr_exit();
-}
-
-void smp_resume(void)
-{
- smp_intr_init();
- local_setup_timer();
+ unbind_ipi_from_irq(CALL_FUNCTION_VECTOR, cpu);
}
void vcpu_prepare(int vcpu)
@@ -1517,7 +1471,6 @@
extern void hypervisor_callback(void);
extern void failsafe_callback(void);
extern void smp_trap_init(trap_info_t *);
- extern void cpu_restore(void);
vcpu_guest_context_t ctxt;
struct task_struct *idle = idle_task(vcpu);
@@ -1532,7 +1485,7 @@
ctxt.user_regs.gs = 0;
ctxt.user_regs.ss = __KERNEL_DS;
ctxt.user_regs.cs = __KERNEL_CS;
- ctxt.user_regs.eip = (unsigned long)cpu_restore;
+ ctxt.user_regs.eip = (unsigned long)cpu_idle;
ctxt.user_regs.esp = idle->thread.esp;
ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
@@ -1556,7 +1509,6 @@
ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
(void)HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt);
- (void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
}
/*
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Oct 13 14:26:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Thu Oct 13 16:08:59 2005
@@ -785,7 +785,7 @@
rdtscll(vxtime.last_tsc);
#endif
- per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
+ per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER, 0);
(void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
}
@@ -852,21 +852,12 @@
#ifdef CONFIG_SMP
static char timer_name[NR_CPUS][15];
-void local_setup_timer_irq(void)
-{
- int cpu = smp_processor_id();
-
- if (cpu == 0)
- return;
- per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER);
- sprintf(timer_name[cpu], "timer%d", cpu);
- BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
- SA_INTERRUPT, timer_name[cpu], NULL));
-}
-
-void local_setup_timer(void)
-{
- int seq, cpu = smp_processor_id();
+
+void local_setup_timer(unsigned int cpu)
+{
+ int seq;
+
+ BUG_ON(cpu == 0);
do {
seq = read_seqbegin(&xtime_lock);
@@ -874,17 +865,17 @@
per_cpu(shadow_time, cpu).system_timestamp;
} while (read_seqretry(&xtime_lock, seq));
- local_setup_timer_irq();
-}
-
-void local_teardown_timer_irq(void)
-{
- int cpu = smp_processor_id();
-
- if (cpu == 0)
- return;
+ per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER, cpu);
+ sprintf(timer_name[cpu], "timer%d", cpu);
+ BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
+ SA_INTERRUPT, timer_name[cpu], NULL));
+}
+
+void local_teardown_timer(unsigned int cpu)
+{
+ BUG_ON(cpu == 0);
free_irq(per_cpu(timer_irq, cpu), NULL);
- unbind_virq_from_irq(VIRQ_TIMER);
+ unbind_virq_from_irq(VIRQ_TIMER, cpu);
}
#endif
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Thu Oct 13 14:26:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Thu Oct 13 16:08:59 2005
@@ -178,11 +178,10 @@
return irq;
}
-int bind_virq_to_irq(int virq)
+int bind_virq_to_irq(int virq, int cpu)
{
evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
int evtchn, irq;
- int cpu = smp_processor_id();
spin_lock(&irq_mapping_update_lock);
@@ -209,10 +208,9 @@
}
EXPORT_SYMBOL(bind_virq_to_irq);
-void unbind_virq_from_irq(int virq)
+void unbind_virq_from_irq(int virq, int cpu)
{
evtchn_op_t op = { .cmd = EVTCHNOP_close };
- int cpu = smp_processor_id();
int irq = per_cpu(virq_to_irq, cpu)[virq];
int evtchn = irq_to_evtchn[irq];
@@ -240,11 +238,10 @@
}
EXPORT_SYMBOL(unbind_virq_from_irq);
-int bind_ipi_to_irq(int ipi)
+int bind_ipi_to_irq(int ipi, int cpu)
{
evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
int evtchn, irq;
- int cpu = smp_processor_id();
spin_lock(&irq_mapping_update_lock);
@@ -272,10 +269,9 @@
}
EXPORT_SYMBOL(bind_ipi_to_irq);
-void unbind_ipi_from_irq(int ipi)
+void unbind_ipi_from_irq(int ipi, int cpu)
{
evtchn_op_t op = { .cmd = EVTCHNOP_close };
- int cpu = smp_processor_id();
int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
int irq = evtchn_to_irq[evtchn];
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Thu Oct 13
14:26:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Thu Oct 13
16:08:59 2005
@@ -1224,13 +1224,13 @@
int cpu = smp_processor_id();
per_cpu(resched_irq, cpu) =
- bind_ipi_to_irq(RESCHEDULE_VECTOR);
+ bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu);
sprintf(resched_name[cpu], "resched%d", cpu);
BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
SA_INTERRUPT, resched_name[cpu], NULL));
per_cpu(callfunc_irq, cpu) =
- bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
+ bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu);
sprintf(callfunc_name[cpu], "callfunc%d", cpu);
BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
smp_call_function_interrupt,
@@ -1242,10 +1242,10 @@
int cpu = smp_processor_id();
free_irq(per_cpu(resched_irq, cpu), NULL);
- unbind_ipi_from_irq(RESCHEDULE_VECTOR);
+ unbind_ipi_from_irq(RESCHEDULE_VECTOR, cpu);
free_irq(per_cpu(callfunc_irq, cpu), NULL);
- unbind_ipi_from_irq(CALL_FUNCTION_VECTOR);
+ unbind_ipi_from_irq(CALL_FUNCTION_VECTOR, cpu);
}
extern void local_setup_timer_irq(void);
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/drivers/xen/console/console.c
--- a/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Oct 13
14:26:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Thu Oct 13
16:08:59 2005
@@ -768,7 +768,7 @@
#endif
if (xen_start_info->flags & SIF_INITDOMAIN) {
- xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE);
+ xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
(void)request_irq(xencons_priv_irq,
xencons_priv_interrupt, 0, "console", NULL);
} else {
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Oct 13
14:26:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Oct 13
16:08:59 2005
@@ -817,7 +817,7 @@
netif_xenbus_init();
- (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+ (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG, 0),
netif_be_dbg, SA_SHIRQ,
"net-be-dbg", &netif_be_dbg);
diff -r 92c6021f23e4 -r 5a97ee0633e8
linux-2.6-xen-sparse/include/asm-xen/evtchn.h
--- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h Thu Oct 13 14:26:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h Thu Oct 13 16:08:59 2005
@@ -44,12 +44,12 @@
*/
/* Dynamically bind a VIRQ source to Linux IRQ space. */
-extern int bind_virq_to_irq(int virq);
-extern void unbind_virq_from_irq(int virq);
+extern int bind_virq_to_irq(int virq, int cpu);
+extern void unbind_virq_from_irq(int virq, int cpu);
/* Dynamically bind an IPI source to Linux IRQ space. */
-extern int bind_ipi_to_irq(int ipi);
-extern void unbind_ipi_from_irq(int ipi);
+extern int bind_ipi_to_irq(int ipi, int cpu);
+extern void unbind_ipi_from_irq(int ipi, int cpu);
/*
* Dynamically bind an event-channel port to an IRQ-like callback handler.
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|