# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 3fa6635d04b99cfeb936ea9a7ff9dde9b7b5ced8
# Parent a9d2106313fa4e0bce66762ab09fef78cb93dfe2
[XEN] Various selector and callback cleanups to simplify the tools
and assumptions about callback selector values on x86/32.
Original patch from Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
tools/libxc/xc_linux_build.c | 19 ----
tools/libxc/xc_linux_restore.c | 33 -------
xen/arch/x86/domain.c | 33 +------
xen/arch/x86/domain_build.c | 30 +++---
xen/arch/x86/traps.c | 179 ++++++++++++++++----------------------
xen/arch/x86/x86_32/entry.S | 5 -
xen/include/asm-x86/x86_32/regs.h | 3
xen/include/asm-x86/x86_64/regs.h | 3
xen/include/xen/sched.h | 5 -
9 files changed, 114 insertions(+), 196 deletions(-)
diff -r a9d2106313fa -r 3fa6635d04b9 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c Wed Nov 15 16:53:43 2006 +0000
+++ b/tools/libxc/xc_linux_build.c Wed Nov 15 18:41:06 2006 +0000
@@ -1106,7 +1106,7 @@ static int xc_linux_build_internal(int x
{
struct xen_domctl launch_domctl;
DECLARE_DOMCTL;
- int rc, i;
+ int rc;
struct vcpu_guest_context st_ctxt, *ctxt = &st_ctxt;
unsigned long vstartinfo_start, vkern_entry, vstack_start;
uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
@@ -1180,21 +1180,8 @@ static int xc_linux_build_internal(int x
ctxt->flags = VGCF_IN_KERNEL;
- /* Virtual IDT is empty at start-of-day. */
- for ( i = 0; i < 256; i++ )
- {
- ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
-
- /* Ring 1 stack is the initial stack. */
- ctxt->kernel_ss = FLAT_KERNEL_SS;
- ctxt->kernel_sp = vstack_start + PAGE_SIZE;
-
-#if defined(__i386__)
- ctxt->event_callback_cs = FLAT_KERNEL_CS;
- ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
-#endif
+ ctxt->kernel_ss = ctxt->user_regs.ss;
+ ctxt->kernel_sp = ctxt->user_regs.esp;
#endif /* x86 */
memset(&launch_domctl, 0, sizeof(launch_domctl));
diff -r a9d2106313fa -r 3fa6635d04b9 tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c Wed Nov 15 16:53:43 2006 +0000
+++ b/tools/libxc/xc_linux_restore.c Wed Nov 15 18:41:06 2006 +0000
@@ -774,39 +774,6 @@ int xc_linux_restore(int xc_handle, int
memcpy(live_p2m, p2m, P2M_SIZE);
munmap(live_p2m, P2M_SIZE);
- /*
- * Safety checking of saved context:
- * 1. user_regs is fine, as Xen checks that on context switch.
- * 2. fpu_ctxt is fine, as it can't hurt Xen.
- * 3. trap_ctxt needs the code selectors checked.
- * 4. ldt base must be page-aligned, no more than 8192 ents, ...
- * 5. gdt already done, and further checking is done by Xen.
- * 6. check that kernel_ss is safe.
- * 7. pt_base is already done.
- * 8. debugregs are checked by Xen.
- * 9. callback code selectors need checking.
- */
- for ( i = 0; i < 256; i++ ) {
- ctxt.trap_ctxt[i].vector = i;
- if ((ctxt.trap_ctxt[i].cs & 3) == 0)
- ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
- if ((ctxt.kernel_ss & 3) == 0)
- ctxt.kernel_ss = FLAT_KERNEL_DS;
-#if defined(__i386__)
- if ((ctxt.event_callback_cs & 3) == 0)
- ctxt.event_callback_cs = FLAT_KERNEL_CS;
- if ((ctxt.failsafe_callback_cs & 3) == 0)
- ctxt.failsafe_callback_cs = FLAT_KERNEL_CS;
-#endif
- if (((ctxt.ldt_base & (PAGE_SIZE - 1)) != 0) ||
- (ctxt.ldt_ents > 8192) ||
- (ctxt.ldt_base > hvirt_start) ||
- ((ctxt.ldt_base + ctxt.ldt_ents*8) > hvirt_start)) {
- ERROR("Bad LDT base or size");
- goto out;
- }
-
DPRINTF("Domain ready to be built.\n");
domctl.cmd = XEN_DOMCTL_setvcpucontext;
diff -r a9d2106313fa -r 3fa6635d04b9 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/arch/x86/domain.c Wed Nov 15 18:41:06 2006 +0000
@@ -294,6 +294,12 @@ int arch_set_info_guest(
for ( i = 0; i < 256; i++ )
fixup_guest_code_selector(c->trap_ctxt[i].cs);
+
+ /* LDT safety checks. */
+ if ( ((c->ldt_base & (PAGE_SIZE-1)) != 0) ||
+ (c->ldt_ents > 8192) ||
+ !array_access_ok(c->ldt_base, c->ldt_ents, LDT_ENTRY_SIZE) )
+ return -EINVAL;
}
clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
@@ -422,33 +428,6 @@ arch_do_vcpu_op(
return rc;
}
-
-void new_thread(struct vcpu *d,
- unsigned long start_pc,
- unsigned long start_stack,
- unsigned long start_info)
-{
- struct cpu_user_regs *regs = &d->arch.guest_context.user_regs;
-
- /*
- * Initial register values:
- * DS,ES,FS,GS = FLAT_KERNEL_DS
- * CS:EIP = FLAT_KERNEL_CS:start_pc
- * SS:ESP = FLAT_KERNEL_SS:start_stack
- * ESI = start_info
- * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
- */
- regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
- regs->ss = FLAT_KERNEL_SS;
- regs->cs = FLAT_KERNEL_CS;
- regs->eip = start_pc;
- regs->esp = start_stack;
- regs->esi = start_info;
-
- __save_flags(regs->eflags);
- regs->eflags |= X86_EFLAGS_IF;
-}
-
#ifdef __x86_64__
diff -r a9d2106313fa -r 3fa6635d04b9 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/arch/x86/domain_build.c Wed Nov 15 18:41:06 2006 +0000
@@ -249,6 +249,7 @@ int construct_dom0(struct domain *d,
char *cmdline)
{
int i, rc, dom0_pae, xen_pae, order;
+ struct cpu_user_regs *regs;
unsigned long pfn, mfn;
unsigned long nr_pages;
unsigned long nr_pt_pages;
@@ -441,19 +442,7 @@ int construct_dom0(struct domain *d,
mpt_alloc = (vpt_start - dsi.v_start) +
(unsigned long)pfn_to_paddr(alloc_spfn);
- /*
- * We're basically forcing default RPLs to 1, so that our "what privilege
- * level are we returning to?" logic works.
- */
- v->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
- for ( i = 0; i < 256; i++ )
- v->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
-
#if defined(__i386__)
-
- v->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
- v->arch.guest_context.event_callback_cs = FLAT_KERNEL_CS;
-
/*
* Protect the lowest 1GB of memory. We use a temporary mapping there
* from which we copy the kernel and ramdisk images.
@@ -816,7 +805,22 @@ int construct_dom0(struct domain *d,
set_bit(_VCPUF_initialised, &v->vcpu_flags);
- new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
+ /*
+ * Initial register values:
+ * DS,ES,FS,GS = FLAT_KERNEL_DS
+ * CS:EIP = FLAT_KERNEL_CS:start_pc
+ * SS:ESP = FLAT_KERNEL_SS:start_stack
+ * ESI = start_info
+ * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
+ */
+ regs = &d->arch.guest_context.user_regs;
+ regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
+ regs->ss = FLAT_KERNEL_SS;
+ regs->cs = FLAT_KERNEL_CS;
+ regs->eip = dsi.v_kernentry;
+ regs->esp = vstack_end;
+ regs->esi = vstartinfo_start;
+ regs->eflags = X86_EFLAGS_IF;
if ( opt_dom0_shadow )
if ( shadow_test_enable(d) == 0 )
diff -r a9d2106313fa -r 3fa6635d04b9 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/arch/x86/traps.c Wed Nov 15 18:41:06 2006 +0000
@@ -331,14 +331,9 @@ void show_execution_state(struct cpu_use
show_stack(regs);
}
-/*
- * This is called for faults at very unexpected times (e.g., when interrupts
- * are disabled). In such situations we can't do much that is safe. We try to
- * print out some tracing and then we just spin.
- */
-asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
-{
- static char *trapstr[] = {
+char *trapstr(int trapnr)
+{
+ static char *strings[] = {
"divide error", "debug", "nmi", "bkpt", "overflow", "bounds",
"invalid opcode", "device not available", "double fault",
"coprocessor segment", "invalid tss", "segment not found",
@@ -347,6 +342,19 @@ asmlinkage void fatal_trap(int trapnr, s
"machine check", "simd error"
};
+ if ( (trapnr < 0) || (trapnr >= ARRAY_SIZE(strings)) )
+ return "???";
+
+ return strings[trapnr];
+}
+
+/*
+ * This is called for faults at very unexpected times (e.g., when interrupts
+ * are disabled). In such situations we can't do much that is safe. We try to
+ * print out some tracing and then we just spin.
+ */
+asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
+{
watchdog_disable();
console_start_sync();
@@ -361,38 +369,51 @@ asmlinkage void fatal_trap(int trapnr, s
panic("FATAL TRAP: vector = %d (%s)\n"
"[error_code=%04x] %s\n",
- trapnr, trapstr[trapnr], regs->error_code,
+ trapnr, trapstr(trapnr), regs->error_code,
(regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
-static inline int do_trap(int trapnr, char *str,
- struct cpu_user_regs *regs,
- int use_error_code)
+static int do_guest_trap(
+ int trapnr, const struct cpu_user_regs *regs, int use_error_code)
{
struct vcpu *v = current;
- struct trap_bounce *tb = &v->arch.trap_bounce;
- struct trap_info *ti;
- unsigned long fixup;
-
- DEBUGGER_trap_entry(trapnr, regs);
-
- if ( !guest_mode(regs) )
- goto xen_fault;
-
- ti = ¤t->arch.guest_context.trap_ctxt[trapnr];
+ struct trap_bounce *tb;
+ const struct trap_info *ti;
+
+ tb = &v->arch.trap_bounce;
+ ti = &v->arch.guest_context.trap_ctxt[trapnr];
+
tb->flags = TBF_EXCEPTION;
tb->cs = ti->cs;
tb->eip = ti->address;
+
if ( use_error_code )
{
tb->flags |= TBF_EXCEPTION_ERRCODE;
tb->error_code = regs->error_code;
}
+
if ( TI_GET_IF(ti) )
tb->flags |= TBF_INTERRUPT;
+
+ if ( unlikely(null_trap_bounce(tb)) )
+ gdprintk(XENLOG_WARNING, "Unhandled %s fault/trap [#%d] in "
+ "domain %d on VCPU %d [ec=%04x]\n",
+ trapstr(trapnr), trapnr, v->domain->domain_id, v->vcpu_id,
+ regs->error_code);
+
return 0;
-
- xen_fault:
+}
+
+static inline int do_trap(
+ int trapnr, struct cpu_user_regs *regs, int use_error_code)
+{
+ unsigned long fixup;
+
+ DEBUGGER_trap_entry(trapnr, regs);
+
+ if ( guest_mode(regs) )
+ return do_guest_trap(trapnr, regs, use_error_code);
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
@@ -407,32 +428,32 @@ static inline int do_trap(int trapnr, ch
show_execution_state(regs);
panic("FATAL TRAP: vector = %d (%s)\n"
"[error_code=%04x]\n",
- trapnr, str, regs->error_code);
+ trapnr, trapstr(trapnr), regs->error_code);
return 0;
}
-#define DO_ERROR_NOCODE(trapnr, str, name) \
+#define DO_ERROR_NOCODE(trapnr, name) \
asmlinkage int do_##name(struct cpu_user_regs *regs) \
{ \
- return do_trap(trapnr, str, regs, 0); \
-}
-
-#define DO_ERROR(trapnr, str, name) \
+ return do_trap(trapnr, regs, 0); \
+}
+
+#define DO_ERROR(trapnr, name) \
asmlinkage int do_##name(struct cpu_user_regs *regs) \
{ \
- return do_trap(trapnr, str, regs, 1); \
-}
-
-DO_ERROR_NOCODE( 0, "divide error", divide_error)
-DO_ERROR_NOCODE( 4, "overflow", overflow)
-DO_ERROR_NOCODE( 5, "bounds", bounds)
-DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(10, "invalid TSS", invalid_TSS)
-DO_ERROR(11, "segment not present", segment_not_present)
-DO_ERROR(12, "stack segment", stack_segment)
-DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
-DO_ERROR(17, "alignment check", alignment_check)
-DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
+ return do_trap(trapnr, regs, 1); \
+}
+
+DO_ERROR_NOCODE(TRAP_divide_error, divide_error)
+DO_ERROR_NOCODE(TRAP_overflow, overflow)
+DO_ERROR_NOCODE(TRAP_bounds, bounds)
+DO_ERROR_NOCODE(TRAP_copro_seg, coprocessor_segment_overrun)
+DO_ERROR( TRAP_invalid_tss, invalid_TSS)
+DO_ERROR( TRAP_no_segment, segment_not_present)
+DO_ERROR( TRAP_stack_error, stack_segment)
+DO_ERROR_NOCODE(TRAP_copro_error, coprocessor_error)
+DO_ERROR( TRAP_alignment_check, alignment_check)
+DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error)
int rdmsr_hypervisor_regs(
uint32_t idx, uint32_t *eax, uint32_t *edx)
@@ -599,9 +620,6 @@ static int emulate_forced_invalid_op(str
asmlinkage int do_invalid_op(struct cpu_user_regs *regs)
{
- struct vcpu *v = current;
- struct trap_bounce *tb = &v->arch.trap_bounce;
- struct trap_info *ti;
int rc;
DEBUGGER_trap_entry(TRAP_invalid_op, regs);
@@ -625,22 +643,11 @@ asmlinkage int do_invalid_op(struct cpu_
if ( (rc = emulate_forced_invalid_op(regs)) != 0 )
return rc;
- ti = ¤t->arch.guest_context.trap_ctxt[TRAP_invalid_op];
- tb->flags = TBF_EXCEPTION;
- tb->cs = ti->cs;
- tb->eip = ti->address;
- if ( TI_GET_IF(ti) )
- tb->flags |= TBF_INTERRUPT;
-
- return 0;
+ return do_guest_trap(TRAP_invalid_op, regs, 0);
}
asmlinkage int do_int3(struct cpu_user_regs *regs)
{
- struct vcpu *v = current;
- struct trap_bounce *tb = &v->arch.trap_bounce;
- struct trap_info *ti;
-
DEBUGGER_trap_entry(TRAP_int3, regs);
if ( !guest_mode(regs) )
@@ -650,14 +657,7 @@ asmlinkage int do_int3(struct cpu_user_r
panic("FATAL TRAP: vector = 3 (Int3)\n");
}
- ti = ¤t->arch.guest_context.trap_ctxt[TRAP_int3];
- tb->flags = TBF_EXCEPTION;
- tb->cs = ti->cs;
- tb->eip = ti->address;
- if ( TI_GET_IF(ti) )
- tb->flags |= TBF_INTERRUPT;
-
- return 0;
+ return do_guest_trap(TRAP_int3, regs, 0);
}
asmlinkage int do_machine_check(struct cpu_user_regs *regs)
@@ -687,6 +687,12 @@ void propagate_page_fault(unsigned long
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
tb->flags |= TBF_INTERRUPT;
+ if ( unlikely(null_trap_bounce(tb)) )
+ {
+ printk("Unhandled page fault in domain %d on VCPU %d (ec=%04X)\n",
+ v->domain->domain_id, v->vcpu_id, error_code);
+ show_page_walk(addr);
+ }
}
static int handle_gdt_ldt_mapping_fault(
@@ -1481,8 +1487,6 @@ asmlinkage int do_general_protection(str
asmlinkage int do_general_protection(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
- struct trap_bounce *tb = &v->arch.trap_bounce;
- struct trap_info *ti;
unsigned long fixup;
DEBUGGER_trap_entry(TRAP_gp_fault, regs);
@@ -1516,12 +1520,13 @@ asmlinkage int do_general_protection(str
if ( (regs->error_code & 3) == 2 )
{
/* This fault must be due to <INT n> instruction. */
- ti = ¤t->arch.guest_context.trap_ctxt[regs->error_code>>3];
+ const struct trap_info *ti;
+ unsigned char vector = regs->error_code >> 3;
+ ti = &v->arch.guest_context.trap_ctxt[vector];
if ( permit_softint(TI_GET_DPL(ti), v, regs) )
{
- tb->flags = TBF_EXCEPTION;
regs->eip += 2;
- goto finish_propagation;
+ return do_guest_trap(vector, regs, 0);
}
}
@@ -1538,15 +1543,7 @@ asmlinkage int do_general_protection(str
#endif
/* Pass on GPF as is. */
- ti = ¤t->arch.guest_context.trap_ctxt[TRAP_gp_fault];
- tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
- tb->error_code = regs->error_code;
- finish_propagation:
- tb->cs = ti->cs;
- tb->eip = ti->address;
- if ( TI_GET_IF(ti) )
- tb->flags |= TBF_INTERRUPT;
- return 0;
+ return do_guest_trap(TRAP_gp_fault, regs, 1);
gp_in_kernel:
@@ -1684,22 +1681,11 @@ void unset_nmi_callback(void)
asmlinkage int math_state_restore(struct cpu_user_regs *regs)
{
- struct trap_bounce *tb;
- struct trap_info *ti;
-
setup_fpu(current);
if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS )
{
- tb = ¤t->arch.trap_bounce;
- ti = ¤t->arch.guest_context.trap_ctxt[TRAP_no_device];
-
- tb->flags = TBF_EXCEPTION;
- tb->cs = ti->cs;
- tb->eip = ti->address;
- if ( TI_GET_IF(ti) )
- tb->flags |= TBF_INTERRUPT;
-
+ do_guest_trap(TRAP_no_device, regs, 0);
current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
}
@@ -1710,8 +1696,6 @@ asmlinkage int do_debug(struct cpu_user_
{
unsigned long condition;
struct vcpu *v = current;
- struct trap_bounce *tb = &v->arch.trap_bounce;
- struct trap_info *ti;
__asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
@@ -1741,12 +1725,7 @@ asmlinkage int do_debug(struct cpu_user_
/* Save debug status register where guest OS can peek at it */
v->arch.guest_context.debugreg[6] = condition;
- ti = &v->arch.guest_context.trap_ctxt[TRAP_debug];
- tb->flags = TBF_EXCEPTION;
- tb->cs = ti->cs;
- tb->eip = ti->address;
- if ( TI_GET_IF(ti) )
- tb->flags |= TBF_INTERRUPT;
+ return do_guest_trap(TRAP_debug, regs, 0);
out:
return EXCRET_not_a_fault;
diff -r a9d2106313fa -r 3fa6635d04b9 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/arch/x86/x86_32/entry.S Wed Nov 15 18:41:06 2006 +0000
@@ -373,10 +373,11 @@ nvm86_3:/* Rewrite our stack frame and r
mov %gs,UREGS_ss+4(%esp)
movl %esi,UREGS_esp+4(%esp)
movzwl TRAPBOUNCE_cs(%edx),%eax
+ /* Null selectors (0-3) are not allowed. */
+ testl $~3,%eax
+ jz domain_crash_synchronous
movl %eax,UREGS_cs+4(%esp)
movl TRAPBOUNCE_eip(%edx),%eax
- test %eax,%eax
- jz domain_crash_synchronous
movl %eax,UREGS_eip+4(%esp)
movb $0,TRAPBOUNCE_flags(%edx)
ret
diff -r a9d2106313fa -r 3fa6635d04b9 xen/include/asm-x86/x86_32/regs.h
--- a/xen/include/asm-x86/x86_32/regs.h Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/include/asm-x86/x86_32/regs.h Wed Nov 15 18:41:06 2006 +0000
@@ -16,6 +16,9 @@
#define permit_softint(dpl, v, r) \
((dpl) >= (vm86_mode(r) ? 3 : ((r)->cs & 3)))
+/* Check for null trap callback handler: Is the selector null (0-3)? */
+#define null_trap_bounce(tb) (((tb)->cs & ~3) == 0)
+
/* Number of bytes of on-stack execution state to be context-switched. */
#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
diff -r a9d2106313fa -r 3fa6635d04b9 xen/include/asm-x86/x86_64/regs.h
--- a/xen/include/asm-x86/x86_64/regs.h Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/include/asm-x86/x86_64/regs.h Wed Nov 15 18:41:06 2006 +0000
@@ -16,6 +16,9 @@
#define permit_softint(dpl, v, r) \
((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
+/* Check for null trap callback handler: Is the EIP null? */
+#define null_trap_bounce(tb) ((tb)->eip == 0)
+
/* Number of bytes of on-stack execution state to be context-switched. */
/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
diff -r a9d2106313fa -r 3fa6635d04b9 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Wed Nov 15 16:53:43 2006 +0000
+++ b/xen/include/xen/sched.h Wed Nov 15 18:41:06 2006 +0000
@@ -281,11 +281,6 @@ void __domain_crash_synchronous(void) __
printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \
__domain_crash_synchronous(); \
} while (0)
-
-void new_thread(struct vcpu *d,
- unsigned long start_pc,
- unsigned long start_stack,
- unsigned long start_info);
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|