# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 45e34f00a78f68842dc149ff29eaada1e296ef6f
# Parent 722cc2390021eadb162a3ffaf7f537125ee89618
[HVM] Clean up VCPU initialisation in Xen. No longer
parse HVM e820 tables in Xen (add some extra HVM parameters as a
cleaner alternative). Lots of code removal.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
tools/firmware/vmxassist/setup.c | 3
tools/libxc/xc_hvm_build.c | 103 +++++++-------------
xen/arch/x86/domain.c | 48 ++++-----
xen/arch/x86/hvm/hvm.c | 189 ++++++++-----------------------------
xen/arch/x86/hvm/svm/svm.c | 37 -------
xen/arch/x86/hvm/vmx/vmcs.c | 21 ----
xen/arch/x86/hvm/vmx/vmx.c | 80 ++-------------
xen/arch/x86/setup.c | 2
xen/include/asm-x86/hvm/hvm.h | 22 +---
xen/include/asm-x86/hvm/vmx/vmcs.h | 5
xen/include/public/hvm/e820.h | 6 -
xen/include/public/hvm/params.h | 4
12 files changed, 137 insertions(+), 383 deletions(-)
diff -r 722cc2390021 -r 45e34f00a78f tools/firmware/vmxassist/setup.c
--- a/tools/firmware/vmxassist/setup.c Thu Nov 02 14:27:16 2006 +0000
+++ b/tools/firmware/vmxassist/setup.c Thu Nov 02 15:55:51 2006 +0000
@@ -53,13 +53,10 @@ struct e820entry e820map[] = {
struct e820entry e820map[] = {
{ 0x0000000000000000ULL, 0x000000000009F800ULL, E820_RAM },
{ 0x000000000009F800ULL, 0x0000000000000800ULL, E820_RESERVED },
- { 0x00000000000A0000ULL, 0x0000000000020000ULL, E820_IO },
{ 0x00000000000C0000ULL, 0x0000000000040000ULL, E820_RESERVED },
{ 0x0000000000100000ULL, 0x0000000000000000ULL, E820_RAM },
- { 0x0000000000000000ULL, 0x0000000000001000ULL, E820_SHARED_PAGE },
{ 0x0000000000000000ULL, 0x0000000000003000ULL, E820_NVS },
{ 0x0000000000003000ULL, 0x000000000000A000ULL, E820_ACPI },
- { 0x00000000FEC00000ULL, 0x0000000001400000ULL, E820_IO },
};
#endif /* TEST */
diff -r 722cc2390021 -r 45e34f00a78f tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c Thu Nov 02 14:27:16 2006 +0000
+++ b/tools/libxc/xc_hvm_build.c Thu Nov 02 15:55:51 2006 +0000
@@ -56,11 +56,12 @@ static void build_e820map(void *e820_pag
unsigned char nr_map = 0;
/*
- * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
+ * Physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
* for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
* RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
*/
- if ( mem_size > HVM_BELOW_4G_RAM_END ) {
+ if ( mem_size > HVM_BELOW_4G_RAM_END )
+ {
extra_mem_size = mem_size - HVM_BELOW_4G_RAM_END;
mem_size = HVM_BELOW_4G_RAM_END;
}
@@ -75,11 +76,6 @@ static void build_e820map(void *e820_pag
e820entry[nr_map].type = E820_RESERVED;
nr_map++;
- e820entry[nr_map].addr = 0xA0000;
- e820entry[nr_map].size = 0x20000;
- e820entry[nr_map].type = E820_IO;
- nr_map++;
-
e820entry[nr_map].addr = 0xEA000;
e820entry[nr_map].size = 0x01000;
e820entry[nr_map].type = E820_ACPI;
@@ -90,54 +86,14 @@ static void build_e820map(void *e820_pag
e820entry[nr_map].type = E820_RESERVED;
nr_map++;
-/* buffered io page. */
-#define BUFFERED_IO_PAGES 1
-/* xenstore page. */
-#define XENSTORE_PAGES 1
-/* shared io page. */
-#define SHARED_IO_PAGES 1
-/* totally 16 static pages are reserved in E820 table */
-
- /* Most of the ram goes here */
+ /* Low RAM goes here. Remove 3 pages for ioreq, bufioreq, and xenstore. */
e820entry[nr_map].addr = 0x100000;
- e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE *
- (BUFFERED_IO_PAGES +
- XENSTORE_PAGES +
- SHARED_IO_PAGES);
+ e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE * 3;
e820entry[nr_map].type = E820_RAM;
nr_map++;
- /* Statically allocated special pages */
-
- /* For buffered IO requests */
- e820entry[nr_map].addr = mem_size - PAGE_SIZE *
- (BUFFERED_IO_PAGES +
- XENSTORE_PAGES +
- SHARED_IO_PAGES);
- e820entry[nr_map].size = PAGE_SIZE * BUFFERED_IO_PAGES;
- e820entry[nr_map].type = E820_BUFFERED_IO;
- nr_map++;
-
- /* For xenstore */
- e820entry[nr_map].addr = mem_size - PAGE_SIZE *
- (XENSTORE_PAGES +
- SHARED_IO_PAGES);
- e820entry[nr_map].size = PAGE_SIZE * XENSTORE_PAGES;
- e820entry[nr_map].type = E820_XENSTORE;
- nr_map++;
-
- /* Shared ioreq_t page */
- e820entry[nr_map].addr = mem_size - PAGE_SIZE * SHARED_IO_PAGES;
- e820entry[nr_map].size = PAGE_SIZE * SHARED_IO_PAGES;
- e820entry[nr_map].type = E820_SHARED_PAGE;
- nr_map++;
-
- e820entry[nr_map].addr = 0xFEC00000;
- e820entry[nr_map].size = 0x1400000;
- e820entry[nr_map].type = E820_IO;
- nr_map++;
-
- if ( extra_mem_size ) {
+ if ( extra_mem_size )
+ {
e820entry[nr_map].addr = (1ULL << 32);
e820entry[nr_map].size = extra_mem_size;
e820entry[nr_map].type = E820_RAM;
@@ -212,6 +168,7 @@ static int setup_guest(int xc_handle,
void *e820_page;
struct domain_setup_info dsi;
uint64_t v_end;
+ int rc;
memset(&dsi, 0, sizeof(struct domain_setup_info));
@@ -253,10 +210,25 @@ static int setup_guest(int xc_handle,
for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
- if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
- 0, 0, page_array) )
+ /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
+ rc = xc_domain_memory_populate_physmap(
+ xc_handle, dom, (nr_pages > 0xa0) ? 0xa0 : nr_pages,
+ 0, 0, &page_array[0x00]);
+ if ( (rc == 0) && (nr_pages > 0xc0) )
+ rc = xc_domain_memory_populate_physmap(
+ xc_handle, dom, nr_pages - 0xc0, 0, 0, &page_array[0xc0]);
+ if ( rc != 0 )
{
PERROR("Could not allocate memory for HVM guest.\n");
+ goto error_out;
+ }
+
+ if ( (nr_pages > 0xa0) &&
+ xc_domain_memory_decrease_reservation(
+ xc_handle, dom, (nr_pages < 0xc0) ? (nr_pages - 0xa0) : 0x20,
+ 0, &page_array[0xa0]) )
+ {
+ PERROR("Could not free VGA hole.\n");
goto error_out;
}
@@ -295,6 +267,8 @@ static int setup_guest(int xc_handle,
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
+ memset(&shared_info->evtchn_mask[0], 0xff,
+ sizeof(shared_info->evtchn_mask));
munmap(shared_info, PAGE_SIZE);
if ( v_end > HVM_BELOW_4G_RAM_END )
@@ -302,22 +276,17 @@ static int setup_guest(int xc_handle,
else
shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
+ /* Paranoia: clean pages. */
+ if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) ||
+ xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr-1]) ||
+ xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr-2]) )
+ goto error_out;
+
*store_mfn = page_array[shared_page_nr - 1];
-
- xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr - 1);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr-1);
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
-
- /* Paranoia */
- /* clean the shared IO requests page */
- if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) )
- goto error_out;
-
- /* clean the buffered IO requests page */
- if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr - 2]) )
- goto error_out;
-
- if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
- goto error_out;
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, shared_page_nr-2);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, shared_page_nr);
free(page_array);
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/domain.c Thu Nov 02 15:55:51 2006 +0000
@@ -123,20 +123,31 @@ struct vcpu *alloc_vcpu_struct(struct do
memset(v, 0, sizeof(*v));
+ v->vcpu_id = vcpu_id;
+ v->domain = d;
+
v->arch.flags = TF_kernel_mode;
- if ( is_idle_domain(d) )
- {
- v->arch.schedule_tail = continue_idle_domain;
- v->arch.cr3 = __pa(idle_pg_table);
+ if ( is_hvm_domain(d) )
+ {
+ if ( hvm_vcpu_initialise(v) != 0 )
+ {
+ xfree(v);
+ return NULL;
+ }
}
else
{
v->arch.schedule_tail = continue_nonidle_domain;
- }
-
- v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
- v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
+ v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
+ v->arch.ctxt_switch_to = paravirt_ctxt_switch_to;
+
+ if ( is_idle_domain(d) )
+ {
+ v->arch.schedule_tail = continue_idle_domain;
+ v->arch.cr3 = __pa(idle_pg_table);
+ }
+ }
v->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT);
@@ -335,22 +346,11 @@ int arch_set_info_guest(
if ( !is_hvm_vcpu(v) )
{
+ if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
+ return rc;
+
cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
- v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
- }
-
- if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
- return rc;
-
- if ( is_hvm_vcpu(v) )
- {
- v->arch.guest_table = pagetable_null();
-
- if ( !hvm_initialize_guest_resources(v) )
- return -EINVAL;
- }
- else
- {
+
if ( shadow_mode_refcounts(d)
? !get_page(mfn_to_page(cr3_pfn), d)
: !get_page_and_type(mfn_to_page(cr3_pfn), d,
@@ -359,6 +359,8 @@ int arch_set_info_guest(
destroy_gdt(v);
return -EINVAL;
}
+
+ v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
}
/* Shadow: make sure the domain has enough shadow memory to
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c Thu Nov 02 15:55:51 2006 +0000
@@ -57,149 +57,13 @@ integer_param("hvm_debug", opt_hvm_debug
struct hvm_function_table hvm_funcs;
-static void hvm_zap_mmio_range(
- struct domain *d, unsigned long pfn, unsigned long nr_pfn)
-{
- unsigned long i;
-
- ASSERT(d == current->domain);
-
- for ( i = 0; i < nr_pfn; i++ )
- {
- if ( pfn + i >= 0xfffff )
- break;
-
- if ( VALID_MFN(gmfn_to_mfn(d, pfn + i)) )
- guest_remove_page(d, pfn + i);
- }
-}
-
-static void e820_zap_iommu_callback(struct domain *d,
- struct e820entry *e,
- void *ign)
-{
- if ( e->type == E820_IO )
- hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
-}
-
-static void e820_foreach(struct domain *d,
- void (*cb)(struct domain *d,
- struct e820entry *e,
- void *data),
- void *data)
-{
- int i;
- unsigned char e820_map_nr;
- struct e820entry *e820entry;
- unsigned char *p;
- unsigned long mfn;
-
- mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
- if ( mfn == INVALID_MFN )
- {
- printk("Can not find E820 memory map page for HVM domain.\n");
- domain_crash_synchronous();
- }
-
- p = map_domain_page(mfn);
- if ( p == NULL )
- {
- printk("Can not map E820 memory map page for HVM domain.\n");
- domain_crash_synchronous();
- }
-
- e820_map_nr = *(p + E820_MAP_NR_OFFSET);
- e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
-
- for ( i = 0; i < e820_map_nr; i++ )
- cb(d, e820entry + i, data);
-
- unmap_domain_page(p);
-}
-
-static void hvm_zap_iommu_pages(struct domain *d)
-{
- e820_foreach(d, e820_zap_iommu_callback, NULL);
-}
-
-static void e820_map_io_shared_callback(struct domain *d,
- struct e820entry *e,
- void *data)
-{
- unsigned long *mfn = data;
- if ( e->type == E820_SHARED_PAGE )
- {
- ASSERT(*mfn == INVALID_MFN);
- *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
- }
-}
-
-static void e820_map_buffered_io_callback(struct domain *d,
- struct e820entry *e,
- void *data)
-{
- unsigned long *mfn = data;
- if ( e->type == E820_BUFFERED_IO ) {
- ASSERT(*mfn == INVALID_MFN);
- *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
- }
-}
-
-void hvm_map_io_shared_pages(struct vcpu *v)
-{
- unsigned long mfn;
- void *p;
- struct domain *d = v->domain;
-
- if ( d->arch.hvm_domain.shared_page_va ||
- d->arch.hvm_domain.buffered_io_va )
- return;
-
- mfn = INVALID_MFN;
- e820_foreach(d, e820_map_io_shared_callback, &mfn);
-
- if ( mfn == INVALID_MFN )
- {
- printk("Can not find io request shared page for HVM domain.\n");
- domain_crash_synchronous();
- }
-
- p = map_domain_page_global(mfn);
- if ( p == NULL )
- {
- printk("Can not map io request shared page for HVM domain.\n");
- domain_crash_synchronous();
- }
-
- d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-
- mfn = INVALID_MFN;
- e820_foreach(d, e820_map_buffered_io_callback, &mfn);
- if ( mfn != INVALID_MFN ) {
- p = map_domain_page_global(mfn);
- if ( p )
- d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
- }
-}
-
-void hvm_create_event_channels(struct vcpu *v)
-{
- vcpu_iodata_t *p;
- struct vcpu *o;
-
- if ( v->vcpu_id == 0 ) {
- /* Ugly: create event channels for every vcpu when vcpu 0
- starts, so that they're available for ioemu to bind to. */
- for_each_vcpu(v->domain, o) {
- p = get_vio(v->domain, o->vcpu_id);
- o->arch.hvm_vcpu.xen_port = p->vp_eport =
- alloc_unbound_xen_event_channel(o, 0);
- dprintk(XENLOG_INFO, "Allocated port %d for hvm.\n",
- o->arch.hvm_vcpu.xen_port);
- }
- }
-}
-
+void hvm_create_event_channel(struct vcpu *v)
+{
+ v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
+ if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
+ get_vio(v->domain, v->vcpu_id)->vp_eport =
+ v->arch.hvm_vcpu.xen_port;
+}
void hvm_stts(struct vcpu *v)
{
@@ -267,8 +131,6 @@ void hvm_setup_platform(struct domain *d
if ( !is_hvm_domain(d) || (v->vcpu_id != 0) )
return;
-
- hvm_zap_iommu_pages(d);
platform = &d->arch.hvm_domain;
pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
@@ -689,6 +551,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
{
struct xen_hvm_param a;
struct domain *d;
+ struct vcpu *v;
+ unsigned long mfn;
+ void *p;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
@@ -712,8 +577,41 @@ long do_hvm_op(unsigned long op, XEN_GUE
return -EPERM;
}
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail;
+
if ( op == HVMOP_set_param )
{
+ switch ( a.index )
+ {
+ case HVM_PARAM_IOREQ_PFN:
+ if ( d->arch.hvm_domain.shared_page_va )
+ goto param_fail;
+ mfn = gmfn_to_mfn(d, a.value);
+ if ( mfn == INVALID_MFN )
+ goto param_fail;
+ p = map_domain_page_global(mfn);
+ if ( p == NULL )
+ goto param_fail;
+ d->arch.hvm_domain.shared_page_va = (unsigned long)p;
+ /* Initialise evtchn port info if VCPUs already created. */
+ for_each_vcpu ( d, v )
+ get_vio(d, v->vcpu_id)->vp_eport =
+ v->arch.hvm_vcpu.xen_port;
+ break;
+ case HVM_PARAM_BUFIOREQ_PFN:
+ if ( d->arch.hvm_domain.buffered_io_va )
+ goto param_fail;
+ mfn = gmfn_to_mfn(d, a.value);
+ if ( mfn == INVALID_MFN )
+ goto param_fail;
+ p = map_domain_page_global(mfn);
+ if ( p == NULL )
+ goto param_fail;
+ d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
+ break;
+ }
d->arch.hvm_domain.params[a.index] = a.value;
rc = 0;
}
@@ -723,6 +621,7 @@ long do_hvm_op(unsigned long op, XEN_GUE
rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
}
+ param_fail:
put_domain(d);
break;
}
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Nov 02 15:55:51 2006 +0000
@@ -789,40 +789,13 @@ static void svm_ctxt_switch_to(struct vc
svm_restore_dr(v);
}
-
-static void svm_final_setup_guest(struct vcpu *v)
-{
- struct domain *d = v->domain;
-
+static int svm_vcpu_initialise(struct vcpu *v)
+{
v->arch.schedule_tail = arch_svm_do_launch;
v->arch.ctxt_switch_from = svm_ctxt_switch_from;
v->arch.ctxt_switch_to = svm_ctxt_switch_to;
-
- if ( v != d->vcpu[0] )
- return;
-
- if ( !shadow_mode_external(d) )
- {
- gdprintk(XENLOG_ERR, "Can't init HVM for dom %u vcpu %u: "
- "not in shadow external mode\n", d->domain_id, v->vcpu_id);
- domain_crash(d);
- }
-
- /*
- * Required to do this once per domain
- * TODO: add a seperate function to do these.
- */
- memset(&d->shared_info->evtchn_mask[0], 0xff,
- sizeof(d->shared_info->evtchn_mask));
-}
-
-
-static int svm_initialize_guest_resources(struct vcpu *v)
-{
- svm_final_setup_guest(v);
- return 1;
-}
-
+ return 0;
+}
int start_svm(void)
{
@@ -871,7 +844,7 @@ int start_svm(void)
/* Setup HVM interfaces */
hvm_funcs.disable = stop_svm;
- hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
+ hvm_funcs.vcpu_initialise = svm_vcpu_initialise;
hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Thu Nov 02 15:55:51 2006 +0000
@@ -56,7 +56,7 @@
CPU_BASED_INVDPG_EXITING | \
CPU_BASED_MWAIT_EXITING | \
CPU_BASED_MOV_DR_EXITING | \
- CPU_BASED_ACTIVATE_IO_BITMAP | \
+ CPU_BASED_UNCOND_IO_EXITING | \
CPU_BASED_USE_TSC_OFFSETING )
/* Basic flags for VM-Exit controls. */
@@ -240,21 +240,8 @@ static inline int construct_vmcs_control
int error = 0;
error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
-
error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
-
error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
-
- error |= __vmwrite(IO_BITMAP_A, virt_to_maddr(arch_vmx->io_bitmap_a));
- error |= __vmwrite(IO_BITMAP_B, virt_to_maddr(arch_vmx->io_bitmap_b));
-
-#ifdef CONFIG_X86_PAE
- /* On PAE bitmaps may in future be above 4GB. Write high words. */
- error |= __vmwrite(IO_BITMAP_A_HIGH,
- (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_a) >> 32);
- error |= __vmwrite(IO_BITMAP_B_HIGH,
- (paddr_t)virt_to_maddr(arch_vmx->io_bitmap_b) >> 32);
-#endif
return error;
}
@@ -589,12 +576,6 @@ void vmx_destroy_vmcs(struct vcpu *v)
vmx_clear_vmcs(v);
- free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
- free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
-
- arch_vmx->io_bitmap_a = NULL;
- arch_vmx->io_bitmap_b = NULL;
-
vmx_free_vmcs(arch_vmx->vmcs);
arch_vmx->vmcs = NULL;
}
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Nov 02 15:55:51 2006 +0000
@@ -53,77 +53,25 @@ static void vmx_ctxt_switch_from(struct
static void vmx_ctxt_switch_from(struct vcpu *v);
static void vmx_ctxt_switch_to(struct vcpu *v);
-static int vmx_initialize_guest_resources(struct vcpu *v)
-{
- struct domain *d = v->domain;
- struct vcpu *vc;
- void *io_bitmap_a, *io_bitmap_b;
+static int vmx_vcpu_initialise(struct vcpu *v)
+{
int rc;
v->arch.schedule_tail = arch_vmx_do_launch;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
- if ( v->vcpu_id != 0 )
- return 1;
-
- if ( !shadow_mode_external(d) )
- {
- dprintk(XENLOG_ERR, "Can't init HVM for dom %u vcpu %u: "
- "not in shadow external mode\n",
- d->domain_id, v->vcpu_id);
- domain_crash(d);
- }
-
- for_each_vcpu ( d, vc )
- {
- memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct));
-
- if ( (rc = vmx_create_vmcs(vc)) != 0 )
- {
- dprintk(XENLOG_WARNING,
- "Failed to create VMCS for vcpu %d: err=%d.\n",
- vc->vcpu_id, rc);
- return 0;
- }
-
- spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock);
-
- if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
- {
- dprintk(XENLOG_WARNING,
- "Failed to allocate io bitmap b for vcpu %d.\n",
- vc->vcpu_id);
- return 0;
- }
-
- if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
- {
- dprintk(XENLOG_WARNING,
- "Failed to allocate io bitmap b for vcpu %d.\n",
- vc->vcpu_id);
- return 0;
- }
-
- memset(io_bitmap_a, 0xff, 0x1000);
- memset(io_bitmap_b, 0xff, 0x1000);
-
- /* don't bother debug port access */
- clear_bit(PC_DEBUG_PORT, io_bitmap_a);
-
- vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a;
- vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b;
-
- }
-
- /*
- * Required to do this once per domain XXX todo: add a seperate function
- * to do these.
- */
- memset(&d->shared_info->evtchn_mask[0], 0xff,
- sizeof(d->shared_info->evtchn_mask));
-
- return 1;
+ if ( (rc = vmx_create_vmcs(v)) != 0 )
+ {
+ dprintk(XENLOG_WARNING,
+ "Failed to create VMCS for vcpu %d: err=%d.\n",
+ v->vcpu_id, rc);
+ return rc;
+ }
+
+ spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
+
+ return 0;
}
static void vmx_relinquish_guest_resources(struct domain *d)
@@ -747,7 +695,7 @@ static void vmx_setup_hvm_funcs(void)
hvm_funcs.disable = stop_vmx;
- hvm_funcs.initialize_guest_resources = vmx_initialize_guest_resources;
+ hvm_funcs.vcpu_initialise = vmx_vcpu_initialise;
hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources;
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
diff -r 722cc2390021 -r 45e34f00a78f xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/arch/x86/setup.c Thu Nov 02 15:55:51 2006 +0000
@@ -363,7 +363,7 @@ void __init __start_xen(multiboot_info_t
e820_raw[e820_raw_nr].size =
((u64)map->length_high << 32) | (u64)map->length_low;
e820_raw[e820_raw_nr].type =
- (map->type > E820_SHARED_PAGE) ? E820_RESERVED : map->type;
+ (map->type > E820_NVS) ? E820_RESERVED : map->type;
e820_raw_nr++;
bytes += map->size + 4;
diff -r 722cc2390021 -r 45e34f00a78f xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h Thu Nov 02 15:55:51 2006 +0000
@@ -35,7 +35,7 @@ struct hvm_function_table {
/*
* Initialize/relinguish HVM guest resources
*/
- int (*initialize_guest_resources)(struct vcpu *v);
+ int (*vcpu_initialise)(struct vcpu *v);
void (*relinquish_guest_resources)(struct domain *d);
/*
@@ -91,27 +91,21 @@ hvm_disable(void)
hvm_funcs.disable();
}
-void hvm_create_event_channels(struct vcpu *v);
-void hvm_map_io_shared_pages(struct vcpu *v);
+void hvm_create_event_channel(struct vcpu *v);
static inline int
-hvm_initialize_guest_resources(struct vcpu *v)
+hvm_vcpu_initialise(struct vcpu *v)
{
- int ret = 1;
- if ( hvm_funcs.initialize_guest_resources )
- ret = hvm_funcs.initialize_guest_resources(v);
- if ( ret == 1 ) {
- hvm_map_io_shared_pages(v);
- hvm_create_event_channels(v);
- }
- return ret;
+ int rc;
+ if ( (rc = hvm_funcs.vcpu_initialise(v)) == 0 )
+ hvm_create_event_channel(v);
+ return rc;
}
static inline void
hvm_relinquish_guest_resources(struct domain *d)
{
- if (hvm_funcs.relinquish_guest_resources)
- hvm_funcs.relinquish_guest_resources(d);
+ hvm_funcs.relinquish_guest_resources(d);
}
static inline void
diff -r 722cc2390021 -r 45e34f00a78f xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Nov 02 15:55:51 2006 +0000
@@ -49,10 +49,6 @@ struct vmx_msr_state {
unsigned long msr_items[VMX_MSR_COUNT];
unsigned long shadow_gs;
};
-
-/* io bitmap is 4KBytes in size */
-#define IO_BITMAP_SIZE 0x1000
-#define IO_BITMAP_ORDER (get_order_from_bytes(IO_BITMAP_SIZE))
struct arch_vmx_struct {
/* Virtual address of VMCS. */
@@ -82,7 +78,6 @@ struct arch_vmx_struct {
unsigned long cpu_cr3;
unsigned long cpu_based_exec_control;
struct vmx_msr_state msr_content;
- void *io_bitmap_a, *io_bitmap_b;
unsigned long vmxassist_enabled:1;
};
diff -r 722cc2390021 -r 45e34f00a78f xen/include/public/hvm/e820.h
--- a/xen/include/public/hvm/e820.h Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/include/public/hvm/e820.h Thu Nov 02 15:55:51 2006 +0000
@@ -6,12 +6,6 @@
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
-
-/* Xen HVM extended E820 types. */
-#define E820_IO 16
-#define E820_SHARED_PAGE 17
-#define E820_XENSTORE 18
-#define E820_BUFFERED_IO 19
/* E820 location in HVM virtual address space. */
#define E820_MAP_PAGE 0x00090000
diff -r 722cc2390021 -r 45e34f00a78f xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h Thu Nov 02 14:27:16 2006 +0000
+++ b/xen/include/public/hvm/params.h Thu Nov 02 15:55:51 2006 +0000
@@ -7,7 +7,9 @@
#define HVM_PARAM_STORE_EVTCHN 2
#define HVM_PARAM_APIC_ENABLED 3
#define HVM_PARAM_PAE_ENABLED 4
-#define HVM_NR_PARAMS 5
+#define HVM_PARAM_IOREQ_PFN 5
+#define HVM_PARAM_BUFIOREQ_PFN 6
+#define HVM_NR_PARAMS 7
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|