ChangeSet 1.1459, 2005/05/09 15:34:59+01:00, mafetter@xxxxxxxxxxxxxxxx
Hand merge
arch/x86/domain.c | 332 ++++++++++++++++++++++-------------------------
arch/x86/domain_build.c | 13 -
arch/x86/mm.c | 24 +--
arch/x86/shadow.c | 8 -
arch/x86/traps.c | 92 ++++++-------
arch/x86/vmx.c | 50 +++----
include/asm-x86/shadow.h | 2
7 files changed, 257 insertions(+), 264 deletions(-)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-05-09 14:08:14 -04:00
+++ b/xen/arch/x86/domain.c 2005-05-09 14:08:14 -04:00
@@ -37,6 +37,7 @@
#include <asm/vmx.h>
#include <asm/vmx_vmcs.h>
#include <asm/msr.h>
+#include <asm/physdev.h>
#include <xen/kernel.h>
#include <public/io/ioreq.h>
#include <xen/multicall.h>
@@ -50,6 +51,16 @@
} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
+static void continue_idle_task(struct exec_domain *ed)
+{
+ reset_stack_and_jump(idle_loop);
+}
+
+static void continue_nonidle_task(struct exec_domain *ed)
+{
+ reset_stack_and_jump(ret_from_intr);
+}
+
static void default_idle(void)
{
local_irq_disable();
@@ -59,7 +70,7 @@
local_irq_enable();
}
-static __attribute_used__ void idle_loop(void)
+void idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
@@ -74,24 +85,32 @@
}
}
+static void __startup_cpu_idle_loop(struct exec_domain *ed)
+{
+ /* Signal to boot CPU that we are done. */
+ init_idle();
+
+ /* Start normal idle loop. */
+ ed->arch.schedule_tail = continue_idle_task;
+ continue_idle_task(ed);
+}
+
void startup_cpu_idle_loop(void)
{
+ struct exec_domain *ed = current;
+
/* Just some sanity to ensure that the scheduler is set up okay. */
- ASSERT(current->domain->id == IDLE_DOMAIN_ID);
- percpu_ctxt[smp_processor_id()].curr_ed = current;
- set_bit(smp_processor_id(), ¤t->domain->cpuset);
- domain_unpause_by_systemcontroller(current->domain);
+ ASSERT(ed->domain->id == IDLE_DOMAIN_ID);
+ percpu_ctxt[smp_processor_id()].curr_ed = ed;
+ set_bit(smp_processor_id(), &ed->domain->cpuset);
+ domain_unpause_by_systemcontroller(ed->domain);
+
+ ed->arch.schedule_tail = __startup_cpu_idle_loop;
raise_softirq(SCHEDULE_SOFTIRQ);
do_softirq();
- /*
- * Declares CPU setup done to the boot processor.
- * Therefore memory barrier to ensure state is visible.
- */
- smp_mb();
- init_idle();
-
- idle_loop();
+ /* End up in __startup_cpu_idle_loop, not here. */
+ BUG();
}
static long no_idt[2];
@@ -219,16 +238,6 @@
#endif
}
-static void continue_idle_task(struct exec_domain *ed)
-{
- reset_stack_and_jump(idle_loop);
-}
-
-static void continue_nonidle_task(struct exec_domain *ed)
-{
- reset_stack_and_jump(ret_from_intr);
-}
-
void arch_do_createdomain(struct exec_domain *ed)
{
struct domain *d = ed->domain;
@@ -237,17 +246,13 @@
ed->arch.flags = TF_kernel_mode;
- if ( d->id == IDLE_DOMAIN_ID )
- {
- ed->arch.schedule_tail = continue_idle_task;
- }
- else
+ if ( d->id != IDLE_DOMAIN_ID )
{
ed->arch.schedule_tail = continue_nonidle_task;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
@@ -289,7 +294,7 @@
struct domain *d = ed->domain;
ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
ed->arch.perdomain_ptes =
- d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
+ d->arch.mm_perdomain_pt + (ed->id << PDPT_VCPU_SHIFT);
ed->arch.flags = TF_kernel_mode;
}
@@ -312,14 +317,14 @@
reset_stack_and_jump(vmx_asm_do_launch);
}
-static int vmx_final_setup_guest(struct exec_domain *ed,
- full_execution_context_t *full_context)
+static int vmx_final_setup_guest(
+ struct exec_domain *ed, struct vcpu_guest_context *ctxt)
{
int error;
- execution_context_t *context;
+ struct cpu_user_regs *regs;
struct vmcs_struct *vmcs;
- context = &full_context->cpu_ctxt;
+ regs = &ctxt->user_regs;
/*
* Create a new VMCS
@@ -333,7 +338,7 @@
ed->arch.arch_vmx.vmcs = vmcs;
error = construct_vmcs(
- &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
+ &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
if ( error < 0 )
{
printk("Failed to construct a new VMCS\n");
@@ -345,7 +350,7 @@
#if defined (__i386)
ed->arch.arch_vmx.vmx_platform.real_mode_data =
- (unsigned long *) context->esi;
+ (unsigned long *) regs->esi;
#endif
if (ed == ed->domain->exec_domain[0]) {
@@ -375,7 +380,7 @@
/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_set_info_guest(
- struct exec_domain *ed, full_execution_context_t *c)
+ struct exec_domain *ed, struct vcpu_guest_context *c)
{
struct domain *d = ed->domain;
unsigned long phys_basetab;
@@ -386,66 +391,43 @@
* #GP. If DS, ES, FS, GS are DPL 0 then they'll be cleared automatically.
* If SS RPL or DPL differs from CS RPL then we'll #GP.
*/
- if (!(c->flags & ECF_VMX_GUEST))
- if ( ((c->cpu_ctxt.cs & 3) == 0) ||
- ((c->cpu_ctxt.ss & 3) == 0) )
+ if ( !(c->flags & VGCF_VMX_GUEST) )
+ {
+ if ( ((c->user_regs.cs & 3) == 0) ||
+ ((c->user_regs.ss & 3) == 0) )
return -EINVAL;
+ }
- clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
- if ( c->flags & ECF_I387_VALID )
- set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
+ clear_bit(EDF_DONEFPUINIT, &ed->flags);
+ if ( c->flags & VGCF_I387_VALID )
+ set_bit(EDF_DONEFPUINIT, &ed->flags);
ed->arch.flags &= ~TF_kernel_mode;
- if ( c->flags & ECF_IN_KERNEL )
+ if ( c->flags & VGCF_IN_KERNEL )
ed->arch.flags |= TF_kernel_mode;
- memcpy(&ed->arch.user_ctxt,
- &c->cpu_ctxt,
- sizeof(ed->arch.user_ctxt));
-
- memcpy(&ed->arch.i387,
- &c->fpu_ctxt,
- sizeof(ed->arch.i387));
+ memcpy(&ed->arch.guest_context, c, sizeof(*c));
/* IOPL privileges are virtualised. */
- ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3;
- ed->arch.user_ctxt.eflags &= ~EF_IOPL;
+ ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3;
+ ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
/* Clear IOPL for unprivileged domains. */
- if (!IS_PRIV(d))
- ed->arch.user_ctxt.eflags &= 0xffffcfff;
+ if ( !IS_PRIV(d) )
+ ed->arch.guest_context.user_regs.eflags &= 0xffffcfff;
- if (test_bit(EDF_DONEINIT, &ed->ed_flags))
+ if ( test_bit(EDF_DONEINIT, &ed->flags) )
return 0;
- memcpy(ed->arch.traps,
- &c->trap_ctxt,
- sizeof(ed->arch.traps));
-
if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
return rc;
- ed->arch.ldt_base = c->ldt_base;
- ed->arch.ldt_ents = c->ldt_ents;
-
- ed->arch.kernel_ss = c->kernel_ss;
- ed->arch.kernel_sp = c->kernel_esp;
-
+ memset(ed->arch.guest_context.debugreg, 0,
+ sizeof(ed->arch.guest_context.debugreg));
for ( i = 0; i < 8; i++ )
(void)set_debugreg(ed, i, c->debugreg[i]);
-#if defined(__i386__)
- ed->arch.event_selector = c->event_callback_cs;
- ed->arch.event_address = c->event_callback_eip;
- ed->arch.failsafe_selector = c->failsafe_callback_cs;
- ed->arch.failsafe_address = c->failsafe_callback_eip;
-#elif defined(__x86_64__)
- ed->arch.event_address = c->event_callback_eip;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|