ChangeSet 1.1366, 2005/04/22 10:17:26+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Clean up shadow destruction and fix domain destroy when shadow mode
is disabled.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/x86/domain.c | 26 +++++++-------------------
arch/x86/shadow.c | 43 ++++++++++++++++++-------------------------
arch/x86/vmx_vmcs.c | 20 ++------------------
common/page_alloc.c | 2 ++
include/asm-x86/domain.h | 6 +++---
include/asm-x86/shadow.h | 10 +++++++---
include/xen/shadow.h | 1 +
7 files changed, 40 insertions(+), 68 deletions(-)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-04-22 06:03:21 -04:00
+++ b/xen/arch/x86/domain.c 2005-04-22 06:03:21 -04:00
@@ -991,36 +991,24 @@
{
if ( pagetable_val(ed->arch.guest_table) != 0 )
{
- struct pfn_info *page =
- &frame_table[pagetable_val(ed->arch.guest_table)>>PAGE_SHIFT];
-
- if ( shadow_mode_enabled(d) )
- put_page(page);
- else
- put_page_and_type(page);
-
+ (shadow_mode_enabled(d) ? put_page : put_page_and_type)
+ (&frame_table[pagetable_val(
+ ed->arch.guest_table) >> PAGE_SHIFT]);
ed->arch.guest_table = mk_pagetable(0);
}
if ( pagetable_val(ed->arch.guest_table_user) != 0 )
{
- struct pfn_info *page =
- &frame_table[pagetable_val(ed->arch.guest_table_user)
- >> PAGE_SHIFT];
-
- if ( shadow_mode_enabled(d) )
- put_page(page);
- else
- put_page_and_type(page);
-
+ (shadow_mode_enabled(d) ? put_page : put_page_and_type)
+ (&frame_table[pagetable_val(
+ ed->arch.guest_table_user) >> PAGE_SHIFT]);
ed->arch.guest_table_user = mk_pagetable(0);
}
vmx_relinquish_resources(ed);
}
- /* Exit shadow mode before deconstructing final guest page table. */
- shadow_mode_destroy(d);
+ shadow_mode_disable(d);
/*
* Relinquish GDT mappings. No need for explicit unmapping of the LDT as
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c 2005-04-22 06:03:21 -04:00
+++ b/xen/arch/x86/shadow.c 2005-04-22 06:03:21 -04:00
@@ -1111,9 +1111,17 @@
d->arch.out_of_sync_extras_count);
}
-void shadow_mode_destroy(struct domain *d)
+void __shadow_mode_disable(struct domain *d)
{
- shadow_lock(d);
+ if ( unlikely(!shadow_mode_enabled(d)) )
+ return;
+
+ /*
+ * Currently this does not fix up page ref counts, so it is valid to call
+ * only when a domain is being destroyed.
+ */
+ BUG_ON(!test_bit(DF_DYING, &d->d_flags));
+ d->arch.shadow_tainted_refcnts = 1;
free_shadow_pages(d);
free_writable_pte_predictions(d);
@@ -1135,26 +1143,6 @@
free_shadow_ht_entries(d);
free_out_of_sync_entries(d);
-
- shadow_unlock(d);
-}
-
-void __shadow_mode_disable(struct domain *d)
-{
- // This needs rethinking for the full shadow mode stuff.
- //
- // Among other things, ref counts need to be restored to a sensible
- // state for a non-shadow-mode guest...
- // This is probably easiest to do by stealing code from audit_domain().
- //
- BUG();
-
- free_shadow_pages(d);
-
- d->arch.shadow_mode = 0;
-
- free_shadow_ht_entries(d);
- free_out_of_sync_entries(d);
}
static int shadow_mode_table_op(
@@ -1293,7 +1281,7 @@
switch ( op )
{
case DOM0_SHADOW_CONTROL_OP_OFF:
- shadow_mode_disable(d);
+ __shadow_mode_disable(d);
break;
case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
@@ -1303,12 +1291,14 @@
case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
free_shadow_pages(d);
- rc = __shadow_mode_enable(d,
d->arch.shadow_mode|SHM_enable|SHM_log_dirty);
+ rc = __shadow_mode_enable(
+ d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty);
break;
case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE:
free_shadow_pages(d);
- rc = __shadow_mode_enable(d,
d->arch.shadow_mode|SHM_enable|SHM_translate);
+ rc = __shadow_mode_enable(
+ d, d->arch.shadow_mode|SHM_enable|SHM_translate);
break;
default:
@@ -2165,6 +2155,9 @@
int i;
struct shadow_status *a;
u32 count = 0;
+
+ if ( unlikely(!shadow_mode_enabled(d)) )
+ return 0;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
perfc_incrc(remove_all_access);
diff -Nru a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c 2005-04-22 06:03:21 -04:00
+++ b/xen/arch/x86/vmx_vmcs.c 2005-04-22 06:03:21 -04:00
@@ -160,27 +160,11 @@
unsigned int tr, cpu, error = 0;
struct host_execution_env host_env;
struct Xgt_desc_struct desc;
- struct list_head *list_ent;
- unsigned long i, pfn = 0;
+ unsigned long pfn = 0;
struct pfn_info *page;
execution_context_t *ec = get_execution_context();
- struct domain *d = ed->domain;
- cpu = smp_processor_id();
- d->arch.min_pfn = d->arch.max_pfn = 0;
-
- spin_lock(&d->page_alloc_lock);
- list_ent = d->page_list.next;
-
- for ( i = 0; list_ent != &d->page_list; i++ )
- {
- pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
- d->arch.min_pfn = min(d->arch.min_pfn, pfn);
- d->arch.max_pfn = max(d->arch.max_pfn, pfn);
- list_ent = frame_table[pfn].list.next;
- }
-
- spin_unlock(&d->page_alloc_lock);
+ cpu = smp_processor_id();
page = (struct pfn_info *) alloc_domheap_page(NULL);
pfn = (unsigned long) (page - frame_table);
diff -Nru a/xen/common/page_alloc.c b/xen/common/page_alloc.c
--- a/xen/common/page_alloc.c 2005-04-22 06:03:21 -04:00
+++ b/xen/common/page_alloc.c 2005-04-22 06:03:21 -04:00
@@ -562,6 +562,8 @@
for ( i = 0; i < (1 << order); i++ )
{
shadow_drop_references(d, &pg[i]);
+ ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) ||
+ shadow_tainted_refcnts(d));
pg[i].tlbflush_timestamp = tlbflush_current_time();
pg[i].u.free.cpu_mask = d->cpuset;
list_del(&pg[i].list);
diff -Nru a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h 2005-04-22 06:03:21 -04:00
+++ b/xen/include/asm-x86/domain.h 2005-04-22 06:03:21 -04:00
@@ -26,11 +26,11 @@
/* I/O-port access bitmap mask. */
u8 *iobmp_mask; /* Address of IO bitmap mask, or NULL. */
- /* shadow mode status and controls */
+ /* Shadow mode status and controls. */
unsigned int shadow_mode; /* flags to control shadow table operation */
spinlock_t shadow_lock;
- unsigned long min_pfn; /* min host physical */
- unsigned long max_pfn; /* max host physical */
+ /* Shadow mode has tainted page reference counts? */
+ unsigned int shadow_tainted_refcnts;
/* shadow hashtable */
struct shadow_status *shadow_ht;
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h 2005-04-22 06:03:21 -04:00
+++ b/xen/include/asm-x86/shadow.h 2005-04-22 06:03:21 -04:00
@@ -42,6 +42,8 @@
#define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
#define shadow_mode_external(_d) ((_d)->arch.shadow_mode & SHM_external)
+#define shadow_tainted_refcnts(_d) ((_d)->arch.shadow_tainted_refcnts)
+
#define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
#define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
(SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
@@ -173,11 +175,13 @@
extern void __shadow_mode_disable(struct domain *d);
static inline void shadow_mode_disable(struct domain *d)
{
- if ( shadow_mode_enabled(d) )
+ if ( unlikely(shadow_mode_enabled(d)) )
+ {
+ shadow_lock(d);
__shadow_mode_disable(d);
+ shadow_unlock(d);
+ }
}
-
-extern void shadow_mode_destroy(struct domain *d);
/************************************************************************/
diff -Nru a/xen/include/xen/shadow.h b/xen/include/xen/shadow.h
--- a/xen/include/xen/shadow.h 2005-04-22 06:03:21 -04:00
+++ b/xen/include/xen/shadow.h 2005-04-22 06:03:21 -04:00
@@ -12,6 +12,7 @@
#define shadow_drop_references(_d, _p) ((void)0)
#define shadow_sync_and_drop_references(_d, _p) ((void)0)
+#define shadow_tainted_refcnts(_d) (0)
#endif
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|