# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 796ac2386a24b359a88e4924a064bac09c0625e2
# Parent 58d1ef21570698669176adfe90ebb6bc4cd3c8d1
Clean up HVM relinquish_guest_resources interface and implementation.
Ensure we only unmap the shared I/O page after killing all timers
whose handlers may reference that page.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 58d1ef215706 -r 796ac2386a24 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Mar 16 17:57:54 2006
+++ b/xen/arch/x86/domain.c Thu Mar 16 18:36:28 2006
@@ -1030,10 +1030,10 @@
v->arch.guest_table_user = mk_pagetable(0);
}
-
- if ( hvm_guest(v) )
- hvm_relinquish_guest_resources(v);
- }
+ }
+
+ if ( hvm_guest(d->vcpu[0]) )
+ hvm_relinquish_guest_resources(d);
shadow_mode_disable(d);
diff -r 58d1ef215706 -r 796ac2386a24 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu Mar 16 17:57:54 2006
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Mar 16 18:36:28 2006
@@ -77,6 +77,8 @@
int vector, int has_code);
void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
+static void svm_relinquish_guest_resources(struct domain *d);
+
static struct asid_pool ASIDpool[NR_CPUS];
/*
@@ -195,12 +197,6 @@
int svm_initialize_guest_resources(struct vcpu *v)
{
svm_final_setup_guest(v);
- return 1;
-}
-
-int svm_relinquish_guest_resources(struct vcpu *v)
-{
- svm_relinquish_resources(v);
return 1;
}
@@ -722,43 +718,35 @@
}
-void svm_relinquish_resources(struct vcpu *v)
-{
- struct hvm_virpit *vpit;
+static void svm_relinquish_guest_resources(struct domain *d)
+{
extern void destroy_vmcb(struct arch_svm_struct *); /* XXX */
-
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
#if 0
- /*
- * This is not stored at the moment. We need to keep it somewhere and free
- * it Or maybe not, as it's a per-cpu-core item, and I guess we don't
- * normally remove CPU's other than for hot-plug capable systems, where I
- * guess we have to allocate and free host-save area in this case. Let's
- * not worry about it at the moment, as loosing one page per CPU hot-plug
- * event doesn't seem that excessive. But I may be wrong.
- */
- free_host_save_area(v->arch.hvm_svm.host_save_area);
-#endif
-
- if ( v->vcpu_id == 0 )
- {
- /* unmap IO shared page */
- struct domain *d = v->domain;
- if ( d->arch.hvm_domain.shared_page_va )
- unmap_domain_page_global(
- (void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(d);
- }
-
- destroy_vmcb(&v->arch.hvm_svm);
- free_monitor_pagetable(v);
- vpit = &v->domain->arch.hvm_domain.vpit;
- kill_timer(&vpit->pit_timer);
- kill_timer(&v->arch.hvm_svm.hlt_timer);
- if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
- {
- kill_timer( &(VLAPIC(v)->vlapic_timer) );
- xfree( VLAPIC(v) );
- }
+ /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
+ free_host_save_area(v->arch.hvm_svm.host_save_area);
+#endif
+
+ destroy_vmcb(&v->arch.hvm_svm);
+ free_monitor_pagetable(v);
+ kill_timer(&v->arch.hvm_svm.hlt_timer);
+ if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
+ {
+ kill_timer( &(VLAPIC(v)->vlapic_timer) );
+ xfree(VLAPIC(v));
+ }
+ }
+
+ kill_timer(&d->arch.hvm_domain.vpit.pit_timer);
+
+ if ( d->arch.hvm_domain.shared_page_va )
+ unmap_domain_page_global(
+ (void *)d->arch.hvm_domain.shared_page_va);
+
+ shadow_direct_map_clean(d);
}
diff -r 58d1ef215706 -r 796ac2386a24 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 16 17:57:54 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 16 18:36:28 2006
@@ -78,30 +78,30 @@
}
}
-void vmx_relinquish_resources(struct vcpu *v)
-{
- struct hvm_virpit *vpit;
-
- if (v->vcpu_id == 0) {
- /* unmap IO shared page */
- struct domain *d = v->domain;
- if ( d->arch.hvm_domain.shared_page_va )
- unmap_domain_page_global(
+static void vmx_relinquish_guest_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ vmx_request_clear_vmcs(v);
+ destroy_vmcs(&v->arch.hvm_vmx);
+ free_monitor_pagetable(v);
+ kill_timer(&v->arch.hvm_vmx.hlt_timer);
+ if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
+ {
+ kill_timer(&VLAPIC(v)->vlapic_timer);
+ xfree(VLAPIC(v));
+ }
+ }
+
+ kill_timer(&d->arch.hvm_domain.vpit.pit_timer);
+
+ if ( d->arch.hvm_domain.shared_page_va )
+ unmap_domain_page_global(
(void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(d);
- }
-
- vmx_request_clear_vmcs(v);
- destroy_vmcs(&v->arch.hvm_vmx);
- free_monitor_pagetable(v);
- vpit = &v->domain->arch.hvm_domain.vpit;
- kill_timer(&vpit->pit_timer);
- kill_timer(&v->arch.hvm_vmx.hlt_timer);
- if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
- {
- kill_timer(&VLAPIC(v)->vlapic_timer);
- xfree(VLAPIC(v));
- }
+
+ shadow_direct_map_clean(d);
}
#ifdef __x86_64__
@@ -323,12 +323,6 @@
int vmx_initialize_guest_resources(struct vcpu *v)
{
vmx_final_setup_guest(v);
- return 1;
-}
-
-int vmx_relinquish_guest_resources(struct vcpu *v)
-{
- vmx_relinquish_resources(v);
return 1;
}
diff -r 58d1ef215706 -r 796ac2386a24 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Thu Mar 16 17:57:54 2006
+++ b/xen/include/asm-x86/hvm/hvm.h Thu Mar 16 18:36:28 2006
@@ -35,8 +35,8 @@
/*
* Initialize/relinguish HVM guest resources
*/
- int (*initialize_guest_resources)(struct vcpu *v);
- int (*relinquish_guest_resources)(struct vcpu *v);
+ int (*initialize_guest_resources)(struct vcpu *v);
+ void (*relinquish_guest_resources)(struct domain *d);
/*
* Store and load guest state:
@@ -80,24 +80,23 @@
static inline void
hvm_disable(void)
{
- if (hvm_funcs.disable)
- hvm_funcs.disable();
+ if ( hvm_funcs.disable )
+ hvm_funcs.disable();
}
static inline int
hvm_initialize_guest_resources(struct vcpu *v)
{
- if (hvm_funcs.initialize_guest_resources)
- return hvm_funcs.initialize_guest_resources(v);
+ if ( hvm_funcs.initialize_guest_resources )
+ return hvm_funcs.initialize_guest_resources(v);
return 0;
}
-static inline int
-hvm_relinquish_guest_resources(struct vcpu *v)
+static inline void
+hvm_relinquish_guest_resources(struct domain *d)
{
if (hvm_funcs.relinquish_guest_resources)
- return hvm_funcs.relinquish_guest_resources(v);
- return 0;
+ hvm_funcs.relinquish_guest_resources(d);
}
static inline void
@@ -134,9 +133,9 @@
hvm_funcs.restore_msrs(v);
}
#else
-#define hvm_save_segments(v) ((void)0)
-#define hvm_load_msrs(v) ((void)0)
-#define hvm_restore_msrs(v) ((void)0)
+#define hvm_save_segments(v) ((void)0)
+#define hvm_load_msrs(v) ((void)0)
+#define hvm_restore_msrs(v) ((void)0)
#endif /* __x86_64__ */
static inline void
diff -r 58d1ef215706 -r 796ac2386a24 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Thu Mar 16 17:57:54 2006
+++ b/xen/include/asm-x86/hvm/svm/svm.h Thu Mar 16 18:36:28 2006
@@ -44,7 +44,6 @@
extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
extern void svm_final_setup_guest(struct vcpu *v);
extern int svm_paging_enabled(struct vcpu *v);
-extern void svm_relinquish_resources(struct vcpu *v);
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
extern void svm_stts(struct vcpu *v);
extern void svm_do_launch(struct vcpu *v);
diff -r 58d1ef215706 -r 796ac2386a24 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Mar 16 17:57:54 2006
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Mar 16 18:36:28 2006
@@ -28,7 +28,6 @@
extern void stop_vmx(void);
void vmx_final_setup_guest(struct vcpu *v);
-void vmx_relinquish_resources(struct vcpu *v);
void vmx_enter_scheduler(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|