# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID e398a9797c4c6dfccb3f34942e461b02cc6f6e43
# Parent 6f8ce90246f8d420d0ab8edf6a0bdb3b4783e888
some fixes to VMX:
1) enable ACPI in VMXAssist by default.
2) do nothing when emulating wbinvd instruction in VMXAssist.
3) use MACRO in cpu number setting/getting code.
4) remove useless parameter flags from xc_vmx_build.
5) remove usage of dsi.v_start in xc_vmx_build when not handling
vmxloader elf.
6) unmap shared_page_va when destroy VMX domain.
7) change virtual_platform_def to vmx_platform.
8) remove useless code in vmx_setup_platform.
9) change parameter of vmx_setup_platform from vcpu to domain.
10) in xen HV, vmx domain get domain processor number from the reserved
E820 area.
11) in domain.c, don't define some functions when on i386 platform or no
CONFIG_VMX.
Also removed some ugly tailing space.
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
diff -r 6f8ce90246f8 -r e398a9797c4c tools/firmware/vmxassist/Makefile
--- a/tools/firmware/vmxassist/Makefile Sat Oct 22 10:04:45 2005
+++ b/tools/firmware/vmxassist/Makefile Sun Oct 23 10:51:15 2005
@@ -24,7 +24,7 @@
# The emulator code lives in ROM space
TEXTADDR=0x000D0000
-DEFINES=-DDEBUG -DTEXTADDR=$(TEXTADDR)
+DEFINES=-DDEBUG -D_ACPI_ -DTEXTADDR=$(TEXTADDR)
XENINC=-I$(XEN_ROOT)/tools/libxc
LD = ld
diff -r 6f8ce90246f8 -r e398a9797c4c tools/firmware/vmxassist/acpi_madt.c
--- a/tools/firmware/vmxassist/acpi_madt.c Sat Oct 22 10:04:45 2005
+++ b/tools/firmware/vmxassist/acpi_madt.c Sun Oct 23 10:51:15 2005
@@ -24,17 +24,19 @@
extern int puts(const char *s);
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
+#define VCPU_NR_PAGE 0x0009F000
+#define VCPU_NR_OFFSET 0x00000800
+#define VCPU_MAGIC 0x76637075 /* "vcpu" */
/* xc_vmx_builder wrote vcpu block at 0x9F800. Return it. */
-static int
-get_vcpus(void)
+static int
+get_vcpu_nr(void)
{
- unsigned long *vcpus;
+ unsigned int *vcpus;
- vcpus = (unsigned long *)0x9F800;
+ vcpus = (unsigned int *)(VCPU_NR_PAGE + VCPU_NR_OFFSET);
if (vcpus[0] != VCPU_MAGIC) {
- puts("Bad vcpus magic, set vcpu number=1\n");
+ puts("Bad vcpus magic, set vcpu number to 1 by default.\n");
return 1;
}
@@ -123,7 +125,7 @@
if (!madt)
return -1;
- rc = acpi_madt_set_local_apics(get_vcpus(), madt);
+ rc = acpi_madt_set_local_apics(get_vcpu_nr(), madt);
if (rc != 0)
return rc;
diff -r 6f8ce90246f8 -r e398a9797c4c tools/firmware/vmxassist/vm86.c
--- a/tools/firmware/vmxassist/vm86.c Sat Oct 22 10:04:45 2005
+++ b/tools/firmware/vmxassist/vm86.c Sun Oct 23 10:51:15 2005
@@ -784,7 +784,6 @@
}
break;
case 0x09: /* wbinvd */
- asm volatile ( "wbinvd" );
return OPC_EMULATED;
case 0x20: /* mov Rd, Cd (1h) */
case 0x22:
diff -r 6f8ce90246f8 -r e398a9797c4c tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c Sat Oct 22 10:04:45 2005
+++ b/tools/libxc/xc_ia64_stubs.c Sun Oct 23 10:51:15 2005
@@ -24,7 +24,6 @@
const char *ramdisk_name,
const char *cmdline,
unsigned int control_evtchn,
- unsigned long flags,
unsigned int vcpus,
unsigned int store_evtchn,
unsigned long *store_mfn)
diff -r 6f8ce90246f8 -r e398a9797c4c tools/libxc/xc_vmx_build.c
--- a/tools/libxc/xc_vmx_build.c Sat Oct 22 10:04:45 2005
+++ b/tools/libxc/xc_vmx_build.c Sun Oct 23 10:51:15 2005
@@ -29,9 +29,12 @@
#define E820_SHARED_PAGE 17
#define E820_XENSTORE 18
-#define E820_MAP_PAGE 0x00090000
-#define E820_MAP_NR_OFFSET 0x000001E8
-#define E820_MAP_OFFSET 0x000002D0
+#define E820_MAP_PAGE 0x00090000
+#define E820_MAP_NR_OFFSET 0x000001E8
+#define E820_MAP_OFFSET 0x000002D0
+
+#define VCPU_NR_PAGE 0x0009F000
+#define VCPU_NR_OFFSET 0x00000800
struct e820entry {
uint64_t addr;
@@ -120,23 +123,22 @@
* Use E820 reserved memory 0x9F800 to pass number of vcpus to vmxloader
* vmxloader will use it to config ACPI MADT table
*/
-#define VCPU_MAGIC 0x76637075 /* "vcpu" */
-static int
-set_nr_vcpus(int xc_handle, uint32_t dom, unsigned long *pfn_list,
- struct domain_setup_info *dsi, unsigned long vcpus)
-{
- char *va_map;
- unsigned long *va_vcpus;
-
- va_map = xc_map_foreign_range(
- xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
- pfn_list[(0x9F000 - dsi->v_start) >> PAGE_SHIFT]);
+#define VCPU_MAGIC 0x76637075 /* "vcpu" */
+static int set_vcpu_nr(int xc_handle, uint32_t dom,
+ unsigned long *pfn_list, unsigned int vcpus)
+{
+ char *va_map;
+ unsigned int *va_vcpus;
+
+ va_map = xc_map_foreign_range(xc_handle, dom,
+ PAGE_SIZE, PROT_READ|PROT_WRITE,
+ pfn_list[VCPU_NR_PAGE >> PAGE_SHIFT]);
if ( va_map == NULL )
return -1;
- va_vcpus = (unsigned long *)(va_map + 0x800);
- *va_vcpus++ = VCPU_MAGIC;
- *va_vcpus++ = vcpus;
+ va_vcpus = (unsigned int *)(va_map + VCPU_NR_OFFSET);
+ va_vcpus[0] = VCPU_MAGIC;
+ va_vcpus[1] = vcpus;
munmap(va_map, PAGE_SIZE);
@@ -277,7 +279,6 @@
vcpu_guest_context_t *ctxt,
unsigned long shared_info_frame,
unsigned int control_evtchn,
- unsigned long flags,
unsigned int vcpus,
unsigned int store_evtchn,
unsigned long *store_mfn)
@@ -366,7 +367,7 @@
goto error_out;
/* First allocate page for page dir or pdpt */
- ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
+ ppt_alloc = vpt_start >> PAGE_SHIFT;
if ( page_array[ppt_alloc] > 0xfffff )
{
unsigned long nmfn;
@@ -388,8 +389,8 @@
l2tab >> PAGE_SHIFT)) == NULL )
goto error_out;
memset(vl2tab, 0, PAGE_SIZE);
- vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
- for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
+ vl2e = &vl2tab[l2_table_offset(0)];
+ for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
{
if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
{
@@ -404,7 +405,7 @@
goto error_out;
}
memset(vl1tab, 0, PAGE_SIZE);
- vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
+ vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
*vl2e++ = l1tab | L2_PROT;
}
@@ -436,9 +437,8 @@
vl3tab[i] = l2tab | L3_PROT;
}
- vl3e = &vl3tab[l3_table_offset(dsi.v_start)];
-
- for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
+ vl3e = &vl3tab[l3_table_offset(0)];
+ for ( count = 0; count < (v_end >> PAGE_SHIFT); count++ )
{
if (!(count & (1 << (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))){
l2tab = vl3tab[count >> (L3_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)]
@@ -452,7 +452,7 @@
l2tab >> PAGE_SHIFT)) == NULL )
goto error_out;
- vl2e = &vl2tab[l2_table_offset(dsi.v_start + (count <<
PAGE_SHIFT))];
+ vl2e = &vl2tab[l2_table_offset(count << PAGE_SHIFT)];
}
if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
{
@@ -467,7 +467,7 @@
goto error_out;
}
memset(vl1tab, 0, PAGE_SIZE);
- vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
+ vl1e = &vl1tab[l1_table_offset(count << PAGE_SHIFT)];
*vl2e++ = l1tab | L2_PROT;
}
@@ -488,7 +488,10 @@
goto error_out;
}
- set_nr_vcpus(xc_handle, dom, page_array, &dsi, vcpus);
+ if (set_vcpu_nr(xc_handle, dom, page_array, vcpus)) {
+ fprintf(stderr, "Couldn't set vcpu number for VMX guest.\n");
+ goto error_out;
+ }
*store_mfn = page_array[(v_end-2) >> PAGE_SHIFT];
shared_page_frame = (v_end - PAGE_SIZE) >> PAGE_SHIFT;
@@ -566,28 +569,26 @@
return -1;
}
-
#define VMX_FEATURE_FLAG 0x20
static int vmx_identify(void)
{
int eax, ecx;
-#ifdef __i386__
- __asm__ __volatile__ ("pushl %%ebx; cpuid; popl %%ebx"
+ __asm__ __volatile__ (
+#if defined(__i386__)
+ "push %%ebx; cpuid; pop %%ebx"
+#elif defined(__x86_64__)
+ "push %%rbx; cpuid; pop %%rbx"
+#endif
: "=a" (eax), "=c" (ecx)
: "0" (1)
: "dx");
-#elif defined __x86_64__
- __asm__ __volatile__ ("pushq %%rbx; cpuid; popq %%rbx"
- : "=a" (eax), "=c" (ecx)
- : "0" (1)
- : "dx");
-#endif
if (!(ecx & VMX_FEATURE_FLAG)) {
return -1;
}
+
return 0;
}
@@ -596,7 +597,6 @@
int memsize,
const char *image_name,
unsigned int control_evtchn,
- unsigned long flags,
unsigned int vcpus,
unsigned int store_evtchn,
unsigned long *store_mfn)
@@ -651,9 +651,9 @@
goto error_out;
}
- if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
- ctxt, op.u.getdomaininfo.shared_info_frame,
control_evtchn,
- flags, vcpus, store_evtchn, store_mfn) < 0)
+ if ( setup_guest(xc_handle, domid, memsize, image, image_size,
+ nr_pages, ctxt, op.u.getdomaininfo.shared_info_frame,
+ control_evtchn, vcpus, store_evtchn, store_mfn) < 0)
{
ERROR("Error constructing guest OS");
goto error_out;
diff -r 6f8ce90246f8 -r e398a9797c4c tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h Sat Oct 22 10:04:45 2005
+++ b/tools/libxc/xenguest.h Sun Oct 23 10:51:15 2005
@@ -51,13 +51,11 @@
unsigned int console_evtchn,
unsigned long *console_mfn);
-struct mem_map;
int xc_vmx_build(int xc_handle,
uint32_t domid,
int memsize,
const char *image_name,
unsigned int control_evtchn,
- unsigned long flags,
unsigned int vcpus,
unsigned int store_evtchn,
unsigned long *store_mfn);
diff -r 6f8ce90246f8 -r e398a9797c4c tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Sat Oct 22 10:04:45 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Sun Oct 23 10:51:15 2005
@@ -429,15 +429,15 @@
}
static PyObject *pyxc_vmx_build(PyObject *self,
- PyObject *args,
- PyObject *kwds)
-{
- XcObject *xc = (XcObject *)self;
-
- uint32_t dom;
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ uint32_t dom;
char *image;
- int control_evtchn, store_evtchn;
- int flags = 0, vcpus = 1;
+ int control_evtchn, store_evtchn;
+ int vcpus = 1;
int memsize;
unsigned long store_mfn = 0;
@@ -450,7 +450,7 @@
return NULL;
if ( xc_vmx_build(xc->xc_handle, dom, memsize, image, control_evtchn,
- flags, vcpus, store_evtchn, &store_mfn) != 0 )
+ vcpus, store_evtchn, &store_mfn) != 0 )
return PyErr_SetFromErrno(xc_error);
return Py_BuildValue("{s:i}", "store_mfn", store_mfn);
diff -r 6f8ce90246f8 -r e398a9797c4c xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Sat Oct 22 10:04:45 2005
+++ b/xen/arch/x86/domain.c Sun Oct 23 10:51:15 2005
@@ -1,6 +1,6 @@
/******************************************************************************
* arch/x86/domain.c
- *
+ *
* x86-specific domain handling (e.g., register setup and context switching).
*/
@@ -144,9 +144,7 @@
smp_send_stop();
disable_IO_APIC();
-#ifdef CONFIG_VMX
stop_vmx();
-#endif
/* Rebooting needs to touch the page at absolute address 0. */
*((unsigned short *)__va(0x472)) = reboot_mode;
@@ -204,7 +202,6 @@
page->u.inuse.type_info);
}
-
page = virt_to_page(d->shared_info);
printk("Shared_info@%p: caf=%08x, taf=%" PRtype_info "\n",
_p(page_to_phys(page)), page->count_info,
@@ -260,7 +257,7 @@
return;
v->arch.schedule_tail = continue_nonidle_task;
-
+
d->shared_info = alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
@@ -268,7 +265,7 @@
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
INVALID_M2P_ENTRY);
-
+
d->arch.mm_perdomain_pt = alloc_xenheap_page();
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
@@ -293,22 +290,22 @@
#ifdef __x86_64__
v->arch.guest_vl3table = __linear_l3_table;
v->arch.guest_vl4table = __linear_l4_table;
-
+
d->arch.mm_perdomain_l2 = alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
- d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
+ d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
d->arch.mm_perdomain_l3 = alloc_xenheap_page();
memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
- d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
+ d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
__PAGE_HYPERVISOR);
#endif
-
+
(void)ptwr_init(d);
-
- shadow_lock_init(d);
+
+ shadow_lock_init(d);
INIT_LIST_HEAD(&d->arch.free_shadow_frames);
}
@@ -326,34 +323,6 @@
v->arch.schedule_tail = arch_vmx_do_relaunch;
}
}
-
-#ifdef CONFIG_VMX
-static int vmx_switch_on;
-
-static void vmx_final_setup_guest(struct vcpu *v)
-{
- v->arch.schedule_tail = arch_vmx_do_launch;
-
- if (v == v->domain->vcpu[0]) {
- /*
- * Required to do this once per domain
- * XXX todo: add a seperate function to do these.
- */
- memset(&v->domain->shared_info->evtchn_mask[0], 0xff,
- sizeof(v->domain->shared_info->evtchn_mask));
-
- /* Put the domain in shadow mode even though we're going to be using
- * the shared 1:1 page table initially. It shouldn't hurt */
- shadow_mode_enable(v->domain,
- SHM_enable|SHM_refcounts|
- SHM_translate|SHM_external);
- }
-
- if (!vmx_switch_on)
- vmx_switch_on = 1;
-}
-#endif
-
/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_set_info_guest(
@@ -422,7 +391,7 @@
}
else if ( !(c->flags & VGCF_VMX_GUEST) )
{
- if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
+ if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
PGT_base_page_table) )
return -EINVAL;
}
@@ -507,12 +476,6 @@
: "=r" (__r) : "r" (value), "0" (__r) );\
__r; })
-#if CONFIG_VMX
-#define load_msrs(n) if (vmx_switch_on) vmx_load_msrs(n)
-#else
-#define load_msrs(n) ((void)0)
-#endif
-
/*
* save_segments() writes a mask of segments which are dirty (non-zero),
* allowing load_segments() to avoid some expensive segment loads and
@@ -590,7 +553,7 @@
struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long *rsp =
(n->arch.flags & TF_kernel_mode) ?
- (unsigned long *)regs->rsp :
+ (unsigned long *)regs->rsp :
(unsigned long *)nctxt->kernel_sp;
if ( !(n->arch.flags & TF_kernel_mode) )
@@ -690,9 +653,9 @@
regs->r11 = stu.r11;
regs->rcx = stu.rcx;
}
-
+
/* Saved %rax gets written back to regs->rax in entry.S. */
- return stu.rax;
+ return stu.rax;
}
#define switch_kernel_stack(_n,_c) ((void)0)
@@ -700,7 +663,6 @@
#elif defined(__i386__)
#define load_segments(n) ((void)0)
-#define load_msrs(n) ((void)0)
#define save_segments(p) ((void)0)
static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
@@ -725,7 +687,7 @@
if ( !is_idle_task(p->domain) )
{
memcpy(&p->arch.guest_context.user_regs,
- stack_regs,
+ stack_regs,
CTXT_SWITCH_STACK_BYTES);
unlazy_fpu(p);
save_segments(p);
@@ -811,7 +773,7 @@
{
load_LDT(next);
load_segments(next);
- load_msrs(next);
+ vmx_load_msrs(next);
}
}
@@ -883,7 +845,7 @@
#if defined(__i386__)
regs->eax = op;
regs->eip -= 2; /* re-execute 'int 0x82' */
-
+
for ( i = 0; i < nr_args; i++ )
{
switch ( i )
@@ -899,7 +861,7 @@
#elif defined(__x86_64__)
regs->rax = op;
regs->rip -= 2; /* re-execute 'syscall' */
-
+
for ( i = 0; i < nr_args; i++ )
{
switch ( i )
@@ -920,20 +882,6 @@
return op;
}
-#ifdef CONFIG_VMX
-static void vmx_relinquish_resources(struct vcpu *v)
-{
- if ( !VMX_DOMAIN(v) )
- return;
-
- destroy_vmcs(&v->arch.arch_vmx);
- free_monitor_pagetable(v);
- rem_ac_timer(&v->domain->arch.vmx_platform.vmx_pit.pit_timer);
-}
-#else
-#define vmx_relinquish_resources(_v) ((void)0)
-#endif
-
static void relinquish_memory(struct domain *d, struct list_head *list)
{
struct list_head *ent;
@@ -972,7 +920,7 @@
for ( ; ; )
{
x = y;
- if ( likely((x & (PGT_type_mask|PGT_validated)) !=
+ if ( likely((x & (PGT_type_mask|PGT_validated)) !=
(PGT_base_page_table|PGT_validated)) )
break;
@@ -1033,7 +981,7 @@
shadow_mode_disable(d);
/*
- * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
+ * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
* it automatically gets squashed when the guest's mappings go away.
*/
for_each_vcpu(d, v)
diff -r 6f8ce90246f8 -r e398a9797c4c xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c Sat Oct 22 10:04:45 2005
+++ b/xen/arch/x86/vmx.c Sun Oct 23 10:51:15 2005
@@ -57,6 +57,47 @@
#define TRACE_VMEXIT(index,value) ((void)0)
#endif
+static int vmx_switch_on;
+
+void vmx_final_setup_guest(struct vcpu *v)
+{
+ v->arch.schedule_tail = arch_vmx_do_launch;
+
+ if ( v == v->domain->vcpu[0] )
+ {
+ /*
+ * Required to do this once per domain
+ * XXX todo: add a seperate function to do these.
+ */
+ memset(&v->domain->shared_info->evtchn_mask[0], 0xff,
+ sizeof(v->domain->shared_info->evtchn_mask));
+
+ /* Put the domain in shadow mode even though we're going to be using
+ * the shared 1:1 page table initially. It shouldn't hurt */
+ shadow_mode_enable(v->domain,
+ SHM_enable|SHM_refcounts|
+ SHM_translate|SHM_external);
+ }
+
+ vmx_switch_on = 1;
+}
+
+void vmx_relinquish_resources(struct vcpu *v)
+{
+ if ( !VMX_DOMAIN(v) )
+ return;
+
+ if (v->vcpu_id == 0) {
+ /* unmap IO shared page */
+ struct domain *d = v->domain;
+ unmap_domain_page((void *)d->arch.vmx_platform.shared_page_va);
+ }
+
+ destroy_vmcs(&v->arch.arch_vmx);
+ free_monitor_pagetable(v);
+ rem_ac_timer(&v->domain->arch.vmx_platform.vmx_pit.pit_timer);
+}
+
#ifdef __x86_64__
static struct msr_state percpu_msr[NR_CPUS];
@@ -76,6 +117,9 @@
{
struct msr_state *host_state;
host_state = &percpu_msr[smp_processor_id()];
+
+ if ( !vmx_switch_on )
+ return;
while (host_state->flags){
int i;
diff -r 6f8ce90246f8 -r e398a9797c4c xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c Sat Oct 22 10:04:45 2005
+++ b/xen/arch/x86/vmx_io.c Sun Oct 23 10:51:15 2005
@@ -883,7 +883,7 @@
int highest_vector;
unsigned long intr_fields, eflags, interruptibility, cpu_exec_control;
struct vcpu *v = current;
- struct virtual_platform_def *plat=&v->domain->arch.vmx_platform;
+ struct vmx_platform *plat=&v->domain->arch.vmx_platform;
struct vmx_virpit *vpit = &plat->vmx_pit;
struct vmx_virpic *pic= &plat->vmx_pic;
diff -r 6f8ce90246f8 -r e398a9797c4c xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c Sat Oct 22 10:04:45 2005
+++ b/xen/arch/x86/vmx_vmcs.c Sun Oct 23 10:51:15 2005
@@ -142,7 +142,7 @@
#endif
};
-static void get_io_shared_page(struct vcpu *v)
+static void vmx_map_io_shared_page(struct domain *d)
{
int i;
unsigned char e820_map_nr;
@@ -150,9 +150,6 @@
unsigned char *p;
unsigned long mpfn;
unsigned long gpfn = 0;
-
- if (!(VMX_DOMAIN(v) && (v->vcpu_id == 0)))
- return;
local_flush_tlb_pge();
@@ -200,24 +197,61 @@
printk("Can not map io request shared page for VMX domain.\n");
domain_crash();
}
- v->domain->arch.vmx_platform.shared_page_va = (unsigned long)p;
-
- VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(v->domain));
-
- clear_bit(iopacket_port(v->domain),
- &v->domain->shared_info->evtchn_mask[0]);
-}
-
-static void vmx_setup_platform(struct vcpu *v)
-{
- struct virtual_platform_def *platform;
- if (v->vcpu_id == 0) {
- get_io_shared_page(v);
- platform = &v->domain->arch.vmx_platform;
- pic_init(&platform->vmx_pic, pic_irq_request,
- &platform->interrupt_request);
- register_pic_io_hook();
- }
+ d->arch.vmx_platform.shared_page_va = (unsigned long)p;
+
+ VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
+
+ clear_bit(iopacket_port(d),
+ &d->shared_info->evtchn_mask[0]);
+}
+
+#define VCPU_NR_PAGE 0x0009F000
+#define VCPU_NR_OFFSET 0x00000800
+#define VCPU_MAGIC 0x76637075 /* "vcpu" */
+
+static void vmx_set_vcpu_nr(struct domain *d)
+{
+ unsigned char *p;
+ unsigned long mpfn;
+ unsigned int *vcpus;
+
+ mpfn = get_mfn_from_pfn(VCPU_NR_PAGE >> PAGE_SHIFT);
+ if (mpfn == INVALID_MFN) {
+ printk("Can not get vcpu number page mfn for VMX domain.\n");
+ domain_crash_synchronous();
+ }
+
+ p = map_domain_page(mpfn);
+ if (p == NULL) {
+ printk("Can not map vcpu number page for VMX domain.\n");
+ domain_crash_synchronous();
+ }
+
+ vcpus = (unsigned int *)(p + VCPU_NR_OFFSET);
+ if (vcpus[0] != VCPU_MAGIC) {
+ printk("Bad vcpus magic, set vcpu number to 1 by default.\n");
+ d->arch.vmx_platform.nr_vcpu = 1;
+ }
+
+ d->arch.vmx_platform.nr_vcpu = vcpus[1];
+
+ unmap_domain_page(p);
+}
+
+static void vmx_setup_platform(struct domain* d)
+{
+ struct vmx_platform *platform;
+
+ if (!(VMX_DOMAIN(current) && (current->vcpu_id == 0)))
+ return;
+
+ vmx_map_io_shared_page(d);
+ vmx_set_vcpu_nr(d);
+
+ platform = &d->arch.vmx_platform;
+ pic_init(&platform->vmx_pic, pic_irq_request,
+ &platform->interrupt_request);
+ register_pic_io_hook();
}
static void vmx_set_host_env(struct vcpu *v)
@@ -249,9 +283,10 @@
{
/* Update CR3, GDT, LDT, TR */
unsigned int error = 0;
- unsigned long pfn = 0;
unsigned long cr0, cr4;
- struct pfn_info *page;
+
+ if (v->vcpu_id == 0)
+ vmx_setup_platform(v->domain);
__asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
@@ -277,12 +312,6 @@
error |= __vmwrite(CR4_READ_SHADOW, cr4);
vmx_stts();
-
- page = (struct pfn_info *) alloc_domheap_page(NULL);
- pfn = (unsigned long) (page - frame_table);
-
- if ( v == v->domain->vcpu[0] )
- vmx_setup_platform(v);
vmx_set_host_env(v);
diff -r 6f8ce90246f8 -r e398a9797c4c xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Sat Oct 22 10:04:45 2005
+++ b/xen/include/asm-x86/domain.h Sun Oct 23 10:51:15 2005
@@ -61,8 +61,8 @@
struct list_head free_shadow_frames;
- pagetable_t phys_table; /* guest 1:1 pagetable */
- struct virtual_platform_def vmx_platform;
+ pagetable_t phys_table; /* guest 1:1 pagetable */
+ struct vmx_platform vmx_platform;
} __cacheline_aligned;
struct arch_vcpu
diff -r 6f8ce90246f8 -r e398a9797c4c xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Sat Oct 22 10:04:45 2005
+++ b/xen/include/asm-x86/vmx.h Sun Oct 23 10:51:15 2005
@@ -503,6 +503,11 @@
return 0;
}
+static inline unsigned int vmx_get_vcpu_nr(struct domain *d)
+{
+ return d->arch.vmx_platform.nr_vcpu;
+}
+
static inline shared_iopage_t *get_sp(struct domain *d)
{
return (shared_iopage_t *) d->arch.vmx_platform.shared_page_va;
diff -r 6f8ce90246f8 -r e398a9797c4c xen/include/asm-x86/vmx_platform.h
--- a/xen/include/asm-x86/vmx_platform.h Sat Oct 22 10:04:45 2005
+++ b/xen/include/asm-x86/vmx_platform.h Sun Oct 23 10:51:15 2005
@@ -77,8 +77,10 @@
#define MAX_INST_LEN 32
-struct virtual_platform_def {
+struct vmx_platform {
unsigned long shared_page_va;
+ unsigned int nr_vcpu;
+
struct vmx_virpit vmx_pit;
struct vmx_io_handler vmx_io_handler;
struct vmx_virpic vmx_pic;
diff -r 6f8ce90246f8 -r e398a9797c4c xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h Sat Oct 22 10:04:45 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h Sun Oct 23 10:51:15 2005
@@ -34,6 +34,9 @@
#define vmx_load_msrs(_n) ((void)0)
#define vmx_restore_msrs(_v) ((void)0)
#endif
+
+void vmx_final_setup_guest(struct vcpu *v);
+void vmx_relinquish_resources(struct vcpu *v);
void vmx_enter_scheduler(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|