# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 85f92475b9437fcd10bf1ae105f53b0abe963050
# Parent dd87869f877ca9c68c97f36b3870908fb279edb9
Create new vcpu_op() hypercall. Replaces old boot_vcpu()
hypercall and vcpu-related schedop commands.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Mon Oct 3
14:05:37 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c Mon Oct 3
18:14:02 2005
@@ -49,6 +49,7 @@
#include <asm/irq.h>
#include <asm/desc.h>
#include <asm-xen/xen-public/physdev.h>
+#include <asm-xen/xen-public/vcpu.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -178,7 +179,7 @@
don't printk. */
__get_cpu_var(cpu_state) = CPU_DEAD;
/* Tell hypervisor to take vcpu down. */
- HYPERVISOR_vcpu_down(cpu);
+ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
#endif
play_dead();
local_irq_enable();
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Mon Oct 3
14:05:37 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Mon Oct 3
18:14:02 2005
@@ -63,6 +63,7 @@
#include <smpboot_hooks.h>
#include <asm-xen/evtchn.h>
+#include <asm-xen/xen-public/vcpu.h>
/* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping;
@@ -882,11 +883,13 @@
ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
- boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
+ boot_error = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt);
if (boot_error)
printk("boot error: %ld\n", boot_error);
if (!boot_error) {
+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+
/*
* allow APs to start initializing.
*/
@@ -1499,7 +1502,7 @@
#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_XEN
/* Tell hypervisor to bring vcpu up. */
- HYPERVISOR_vcpu_up(cpu);
+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
#endif
/* Already up, and in cpu_quiescent now? */
if (cpu_isset(cpu, smp_commenced_mask)) {
@@ -1621,5 +1624,6 @@
ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
- (void)HYPERVISOR_boot_vcpu(vcpu, &ctxt);
-}
+ (void)HYPERVISOR_vcpu_op(VCPUOP_create, vcpu, &ctxt);
+ (void)HYPERVISOR_vcpu_op(VCPUOP_up, vcpu, NULL);
+}
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Mon Oct 3
14:05:37 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c Mon Oct 3
18:14:02 2005
@@ -62,8 +62,8 @@
#include <asm/nmi.h>
#ifdef CONFIG_XEN
#include <asm/arch_hooks.h>
-
#include <asm-xen/evtchn.h>
+#include <asm-xen/xen-public/vcpu.h>
#endif
/* Change for real CPU hotplug. Note other files need to be fixed
@@ -771,11 +771,13 @@
ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
- boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
+ boot_error = HYPERVISOR_vcpu_op(VCPUOP_create, cpu, &ctxt);
if (boot_error)
printk("boot error: %ld\n", boot_error);
if (!boot_error) {
+ HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+
/*
* allow APs to start initializing.
*/
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Mon Oct 3
14:05:37 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Mon Oct 3
18:14:02 2005
@@ -316,26 +316,10 @@
}
static inline int
-HYPERVISOR_boot_vcpu(
- unsigned long vcpu, vcpu_guest_context_t *ctxt)
-{
- return _hypercall2(int, boot_vcpu, vcpu, ctxt);
-}
-
-static inline int
-HYPERVISOR_vcpu_up(
- int vcpu)
-{
- return _hypercall2(int, sched_op, SCHEDOP_vcpu_up |
- (vcpu << SCHEDOP_vcpushift), 0);
-}
-
-static inline int
-HYPERVISOR_vcpu_pickle(
- int vcpu, vcpu_guest_context_t *ctxt)
-{
- return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle |
- (vcpu << SCHEDOP_vcpushift), ctxt);
+HYPERVISOR_vcpu_op(
+ int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
}
static inline int
@@ -354,24 +338,6 @@
SCHEDOP_reasonshift)),
"2" (srec) : "memory", "ecx");
- return ret;
-}
-
-static inline int
-HYPERVISOR_vcpu_down(
- int vcpu)
-{
- int ret;
- unsigned long ign1;
- /* Yes, I really do want to clobber edx here: when we resume a
- vcpu after unpickling a multi-processor domain, it returns
- here, but clobbers all of the call clobbered registers. */
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1)
- : "0" (__HYPERVISOR_sched_op),
- "1" (SCHEDOP_vcpu_down | (vcpu << SCHEDOP_vcpushift))
- : "memory", "ecx", "edx" );
return ret;
}
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h Mon Oct 3
14:05:37 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypercall.h Mon Oct 3
18:14:02 2005
@@ -601,24 +601,6 @@
return 1;
}
-static inline int
-HYPERVISOR_boot_vcpu(
- unsigned long vcpu, vcpu_guest_context_t *ctxt)
-{
-#if 0
- int ret;
- unsigned long ign1, ign2;
-
- __asm__ __volatile__ (
- TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2)
- : "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt)
- : "memory");
-
- return ret;
-#endif
- return 1;
-}
#endif
#endif /* __HYPERCALL_H__ */
diff -r dd87869f877c -r 85f92475b943
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Mon Oct
3 14:05:37 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Mon Oct
3 18:14:02 2005
@@ -302,26 +302,10 @@
}
static inline int
-HYPERVISOR_boot_vcpu(
- unsigned long vcpu, vcpu_guest_context_t *ctxt)
-{
- return _hypercall2(int, boot_vcpu, vcpu, ctxt);
-}
-
-static inline int
-HYPERVISOR_vcpu_up(
- int vcpu)
-{
- return _hypercall2(int, sched_op, SCHEDOP_vcpu_up |
- (vcpu << SCHEDOP_vcpushift), 0);
-}
-
-static inline int
-HYPERVISOR_vcpu_pickle(
- int vcpu, vcpu_guest_context_t *ctxt)
-{
- return _hypercall2(int, sched_op, SCHEDOP_vcpu_pickle |
- (vcpu << SCHEDOP_vcpushift), ctxt);
+HYPERVISOR_vcpu_op(
+ int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
}
static inline int
diff -r dd87869f877c -r 85f92475b943 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Mon Oct 3 14:05:37 2005
+++ b/xen/arch/x86/x86_32/entry.S Mon Oct 3 18:14:02 2005
@@ -808,7 +808,7 @@
.long do_vm_assist
.long do_update_va_mapping_otherdomain
.long do_switch_vm86
- .long do_boot_vcpu
+ .long do_vcpu_op
.long do_ni_hypercall /* 25 */
.long do_mmuext_op
.long do_acm_op /* 27 */
@@ -841,7 +841,7 @@
.byte 2 /* do_vm_assist */
.byte 5 /* do_update_va_mapping_otherdomain */
.byte 0 /* do_switch_vm86 */
- .byte 2 /* do_boot_vcpu */
+ .byte 3 /* do_vcpu_op */
.byte 0 /* do_ni_hypercall */ /* 25 */
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
diff -r dd87869f877c -r 85f92475b943 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S Mon Oct 3 14:05:37 2005
+++ b/xen/arch/x86/x86_64/entry.S Mon Oct 3 18:14:02 2005
@@ -629,7 +629,7 @@
.quad do_vm_assist
.quad do_update_va_mapping_otherdomain
.quad do_switch_to_user
- .quad do_boot_vcpu
+ .quad do_vcpu_op
.quad do_set_segment_base /* 25 */
.quad do_mmuext_op
.quad do_acm_op
@@ -662,7 +662,7 @@
.byte 2 /* do_vm_assist */
.byte 4 /* do_update_va_mapping_otherdomain */
.byte 0 /* do_switch_to_user */
- .byte 2 /* do_boot_vcpu */
+ .byte 3 /* do_vcpu_op */
.byte 2 /* do_set_segment_base */ /* 25 */
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
diff -r dd87869f877c -r 85f92475b943 xen/common/domain.c
--- a/xen/common/domain.c Mon Oct 3 14:05:37 2005
+++ b/xen/common/domain.c Mon Oct 3 18:14:02 2005
@@ -18,6 +18,7 @@
#include <xen/domain_page.h>
#include <asm/debugger.h>
#include <public/dom0_ops.h>
+#include <public/vcpu.h>
/* Both these structures are protected by the domlist_lock. */
rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
@@ -366,37 +367,17 @@
return rc;
}
-/*
- * final_setup_guest is used for final setup and launching of domains other
- * than domain 0. ie. the domains that are being built by the userspace dom0
- * domain builder.
- */
-long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
-{
- struct domain *d = current->domain;
- struct vcpu *v;
- int rc = 0;
- struct vcpu_guest_context *c;
-
- if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) )
- return -EINVAL;
-
- if ( alloc_vcpu_struct(d, vcpu) == NULL )
+int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt)
+{
+ struct vcpu *v;
+ int rc;
+
+ ASSERT(d->vcpu[vcpuid] == NULL);
+
+ if ( alloc_vcpu_struct(d, vcpuid) == NULL )
return -ENOMEM;
- if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
- {
- rc = -ENOMEM;
- goto out;
- }
-
- if ( copy_from_user(c, ctxt, sizeof(*c)) )
- {
- rc = -EFAULT;
- goto out;
- }
-
- v = d->vcpu[vcpu];
+ v = d->vcpu[vcpuid];
atomic_set(&v->pausecnt, 0);
v->cpumap = CPUMAP_RUNANYWHERE;
@@ -405,22 +386,73 @@
arch_do_boot_vcpu(v);
- if ( (rc = arch_set_info_guest(v, c)) != 0 )
+ if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
goto out;
sched_add_domain(v);
- /* domain_unpause_by_systemcontroller */
- if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
- vcpu_wake(v);
-
- xfree(c);
+ set_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
+
return 0;
out:
- xfree(c);
- arch_free_vcpu_struct(d->vcpu[vcpu]);
- d->vcpu[vcpu] = NULL;
+ arch_free_vcpu_struct(d->vcpu[vcpuid]);
+ d->vcpu[vcpuid] = NULL;
+ return rc;
+}
+
+long do_vcpu_op(int cmd, int vcpuid, void *arg)
+{
+ struct domain *d = current->domain;
+ struct vcpu *v;
+ struct vcpu_guest_context *ctxt;
+ long rc = 0;
+
+ if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
+ return -EINVAL;
+
+ if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_create) )
+ return -ENOENT;
+
+ switch ( cmd )
+ {
+ case VCPUOP_create:
+ if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
+ {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if ( copy_from_user(ctxt, arg, sizeof(*ctxt)) )
+ {
+ xfree(ctxt);
+ rc = -EFAULT;
+ break;
+ }
+
+ LOCK_BIGLOCK(d);
+ rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST;
+ UNLOCK_BIGLOCK(d);
+
+ xfree(ctxt);
+ break;
+
+ case VCPUOP_up:
+ if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
+ vcpu_wake(v);
+ break;
+
+ case VCPUOP_down:
+ if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+ vcpu_sleep_nosync(v);
+ break;
+
+ case VCPUOP_is_up:
+ rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
+ break;
+ }
+
return rc;
}
diff -r dd87869f877c -r 85f92475b943 xen/common/schedule.c
--- a/xen/common/schedule.c Mon Oct 3 14:05:37 2005
+++ b/xen/common/schedule.c Mon Oct 3 18:14:02 2005
@@ -270,69 +270,6 @@
return 0;
}
-/* Mark target vcpu as non-runnable so it is not scheduled */
-static long do_vcpu_down(int vcpu)
-{
- struct vcpu *target;
-
- if ( vcpu > MAX_VIRT_CPUS )
- return -EINVAL;
-
- target = current->domain->vcpu[vcpu];
- if ( target == NULL )
- return -ESRCH;
- set_bit(_VCPUF_down, &target->vcpu_flags);
-
- return 0;
-}
-
-/* Mark target vcpu as runnable and wake it */
-static long do_vcpu_up(int vcpu)
-{
- struct vcpu *target;
-
- if (vcpu > MAX_VIRT_CPUS)
- return -EINVAL;
-
- target = current->domain->vcpu[vcpu];
- if ( target == NULL )
- return -ESRCH;
- clear_bit(_VCPUF_down, &target->vcpu_flags);
- /* wake vcpu */
- vcpu_wake(target);
-
- return 0;
-}
-
-static long do_vcpu_pickle(int vcpu, unsigned long arg)
-{
- struct vcpu *v;
- vcpu_guest_context_t *c;
- int ret = 0;
-
- if (vcpu >= MAX_VIRT_CPUS)
- return -EINVAL;
- v = current->domain->vcpu[vcpu];
- if (!v)
- return -ESRCH;
- /* Don't pickle vcpus which are currently running */
- if (!test_bit(_VCPUF_down, &v->vcpu_flags)) {
- return -EBUSY;
- }
- c = xmalloc(vcpu_guest_context_t);
- if (!c)
- return -ENOMEM;
- arch_getdomaininfo_ctxt(v, c);
- if (copy_to_user((vcpu_guest_context_t *)arg,
- (const vcpu_guest_context_t *)c, sizeof(*c)))
- ret = -EFAULT;
- xfree(c);
- return ret;
-}
-
-/*
- * Demultiplex scheduler-related hypercalls.
- */
long do_sched_op(unsigned long op, unsigned long arg)
{
long ret = 0;
@@ -359,21 +296,6 @@
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
}
- case SCHEDOP_vcpu_down:
- {
- ret = do_vcpu_down((int)(op >> SCHEDOP_vcpushift));
- break;
- }
- case SCHEDOP_vcpu_up:
- {
- ret = do_vcpu_up((int)(op >> SCHEDOP_vcpushift));
- break;
- }
- case SCHEDOP_vcpu_pickle:
- {
- ret = do_vcpu_pickle((int)(op >> SCHEDOP_vcpushift), arg);
- break;
- }
default:
ret = -ENOSYS;
@@ -395,8 +317,8 @@
return 0;
}
-/** sched_id - fetch ID of current scheduler */
-int sched_id()
+/* sched_id - fetch ID of current scheduler */
+int sched_id(void)
{
return ops.sched_id;
}
diff -r dd87869f877c -r 85f92475b943 xen/include/public/xen.h
--- a/xen/include/public/xen.h Mon Oct 3 14:05:37 2005
+++ b/xen/include/public/xen.h Mon Oct 3 18:14:02 2005
@@ -55,7 +55,7 @@
#define __HYPERVISOR_update_va_mapping_otherdomain 22
#define __HYPERVISOR_switch_vm86 23 /* x86/32 only */
#define __HYPERVISOR_switch_to_user 23 /* x86/64 only */
-#define __HYPERVISOR_boot_vcpu 24
+#define __HYPERVISOR_vcpu_op 24
#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
#define __HYPERVISOR_mmuext_op 26
#define __HYPERVISOR_acm_op 27
@@ -201,12 +201,8 @@
#define SCHEDOP_yield 0 /* Give up the CPU voluntarily. */
#define SCHEDOP_block 1 /* Block until an event is received. */
#define SCHEDOP_shutdown 2 /* Stop executing this domain. */
-#define SCHEDOP_vcpu_down 3 /* make target VCPU not-runnable. */
-#define SCHEDOP_vcpu_up 4 /* make target VCPU runnable. */
-#define SCHEDOP_vcpu_pickle 5 /* save a vcpu's context to memory. */
#define SCHEDOP_cmdmask 255 /* 8-bit command. */
#define SCHEDOP_reasonshift 8 /* 8-bit reason code. (SCHEDOP_shutdown) */
-#define SCHEDOP_vcpushift 8 /* 8-bit VCPU target. (SCHEDOP_up|down) */
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
diff -r dd87869f877c -r 85f92475b943 xen/include/public/vcpu.h
--- /dev/null Mon Oct 3 14:05:37 2005
+++ b/xen/include/public/vcpu.h Mon Oct 3 18:14:02 2005
@@ -0,0 +1,55 @@
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU creation and hotplug.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xxxxxxxxxxxxx>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * @cmd == VCPUOP_??? (VCPU operation).
+ * @vcpuid == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Create a new VCPU. This must be called before a VCPU can be referred to
+ * in any other hypercall (e.g., to bind event channels). The new VCPU
+ * will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
+ * state for the new VCPU.
+ */
+#define VCPUOP_create 0
+
+/*
+ * Bring up a newly-created or previously brought-down VCPU. This makes the
+ * VCPU runnable.
+ */
+#define VCPUOP_up 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ * 1. This operation may return, and VCPU_is_up may return false, before the
+ * VCPU stops running (i.e., the command is asynchronous). It is a good
+ * idea to ensure that the VCPU has entered a non-critical loop before
+ * bringing it down. Alternatively, this operation is guaranteed
+ * synchronous if invoked by the VCPU itself.
+ * 2. After a VCPU is created, there is currently no way to drop all its
+ * references to domain memory. Even a VCPU that is down still holds
+ * memory references via its pagetable base pointer and GDT. It is good
+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
+ * GDT before bringing it down.
+ */
+#define VCPUOP_down 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up 3
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|