ChangeSet 1.1444, 2005/04/04 17:09:46+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Writable pagetable info is per-domain rather than per-cpu. Based on
a patch from Steven Smith.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/ia64/domain.c | 2
arch/ia64/xenmisc.c | 2
arch/x86/domain.c | 55 +++++++++++++------------
arch/x86/mm.c | 99 ++++++++++++++++++++++------------------------
arch/x86/traps.c | 11 ++---
common/domain.c | 4 -
common/page_alloc.c | 4 -
common/schedule.c | 7 ---
include/asm-ia64/domain.h | 2
include/asm-x86/domain.h | 5 ++
include/asm-x86/mm.h | 49 ++++++++--------------
include/xen/domain.h | 2
12 files changed, 112 insertions(+), 130 deletions(-)
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c 2005-04-04 13:04:55 -04:00
+++ b/xen/arch/ia64/domain.c 2005-04-04 13:04:55 -04:00
@@ -221,7 +221,7 @@
return 1;
}
-void domain_relinquish_memory(struct domain *d)
+void domain_relinquish_resources(struct domain *d)
{
dummy();
}
diff -Nru a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c 2005-04-04 13:04:55 -04:00
+++ b/xen/arch/ia64/xenmisc.c 2005-04-04 13:04:55 -04:00
@@ -68,8 +68,6 @@
struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
-void cleanup_writable_pagetable(struct domain *d, int what) { return; }
-
void raise_actimer_softirq(void)
{
raise_softirq(AC_TIMER_SOFTIRQ);
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-04-04 13:04:55 -04:00
+++ b/xen/arch/x86/domain.c 2005-04-04 13:04:55 -04:00
@@ -275,6 +275,8 @@
mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
#endif
+ (void)ptwr_init(d);
+
shadow_lock_init(d);
}
}
@@ -940,7 +942,24 @@
return op;
}
-static void relinquish_list(struct domain *d, struct list_head *list)
+#ifdef CONFIG_VMX
+static void vmx_relinquish_resources(struct exec_domain *ed)
+{
+ if ( !VMX_DOMAIN(ed) )
+ return;
+
+ BUG_ON(ed->arch.arch_vmx.vmcs == NULL);
+ free_vmcs(ed->arch.arch_vmx.vmcs);
+ ed->arch.arch_vmx.vmcs = 0;
+
+ free_monitor_pagetable(ed);
+ rem_ac_timer(&ed->arch.arch_vmx.vmx_platform.vmx_pit.pit_timer);
+}
+#else
+#define vmx_relinquish_resources(_ed) ((void)0)
+#endif
+
+static void relinquish_memory(struct domain *d, struct list_head *list)
{
struct list_head *ent;
struct pfn_info *page;
@@ -998,30 +1017,16 @@
spin_unlock_recursive(&d->page_alloc_lock);
}
-#ifdef CONFIG_VMX
-static void vmx_domain_relinquish_memory(struct exec_domain *ed)
-{
- struct vmx_virpit_t *vpit = &(ed->arch.arch_vmx.vmx_platform.vmx_pit);
- /*
- * Free VMCS
- */
- ASSERT(ed->arch.arch_vmx.vmcs);
- free_vmcs(ed->arch.arch_vmx.vmcs);
- ed->arch.arch_vmx.vmcs = 0;
-
- free_monitor_pagetable(ed);
- rem_ac_timer(&(vpit->pit_timer));
-}
-#endif
-
-void domain_relinquish_memory(struct domain *d)
+void domain_relinquish_resources(struct domain *d)
{
struct exec_domain *ed;
BUG_ON(d->cpuset != 0);
+ ptwr_destroy(d);
+
/* Release device mappings of other domains */
- gnttab_release_dev_mappings( d->grant_table );
+ gnttab_release_dev_mappings(d->grant_table);
/* Exit shadow mode before deconstructing final guest page table. */
shadow_mode_disable(d);
@@ -1042,13 +1047,9 @@
pagetable_val(ed->arch.guest_table_user) >> PAGE_SHIFT]);
ed->arch.guest_table_user = mk_pagetable(0);
}
- }
-#ifdef CONFIG_VMX
- if ( VMX_DOMAIN(d->exec_domain[0]) )
- for_each_exec_domain ( d, ed )
- vmx_domain_relinquish_memory(ed);
-#endif
+ vmx_relinquish_resources(ed);
+ }
/*
* Relinquish GDT mappings. No need for explicit unmapping of the LDT as
@@ -1058,8 +1059,8 @@
destroy_gdt(ed);
/* Relinquish every page of memory. */
- relinquish_list(d, &d->xenpage_list);
- relinquish_list(d, &d->page_list);
+ relinquish_memory(d, &d->xenpage_list);
+ relinquish_memory(d, &d->page_list);
}
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-04 13:04:56 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-04 13:04:56 -04:00
@@ -2288,8 +2288,6 @@
* Writable Pagetables
*/
-ptwr_info_t ptwr_info[NR_CPUS];
-
#ifdef VERBOSE
int ptwr_debug = 0x0;
#define PTWR_PRINTK(_f, _a...) \
@@ -2300,17 +2298,15 @@
#endif
/* Flush the given writable p.t. page and write-protect it again. */
-void ptwr_flush(const int which)
+void ptwr_flush(struct domain *d, const int which)
{
unsigned long sstat, spte, pte, *ptep, l1va;
l1_pgentry_t *sl1e = NULL, *pl1e, ol1e, nl1e;
l2_pgentry_t *pl2e;
- int i, cpu = smp_processor_id();
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ int i;
unsigned int modified = 0;
- l1va = ptwr_info[cpu].ptinfo[which].l1va;
+ l1va = d->arch.ptwr[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
/*
@@ -2364,10 +2360,10 @@
* STEP 2. Validate any modified PTEs.
*/
- pl1e = ptwr_info[cpu].ptinfo[which].pl1e;
+ pl1e = d->arch.ptwr[which].pl1e;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
- ol1e = ptwr_info[cpu].ptinfo[which].page[i];
+ ol1e = d->arch.ptwr[which].page[i];
nl1e = pl1e[i];
if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
@@ -2400,7 +2396,7 @@
* Make the remaining p.t's consistent before crashing, so the
* reference counts are correct.
*/
- memcpy(&pl1e[i], &ptwr_info[cpu].ptinfo[which].page[i],
+ memcpy(&pl1e[i], &d->arch.ptwr[which].page[i],
(L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
domain_crash();
break;
@@ -2415,8 +2411,7 @@
unmap_domain_mem(pl1e);
perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
- ptwr_info[cpu].ptinfo[which].prev_exec_domain = ed;
- ptwr_info[cpu].ptinfo[which].prev_nr_updates = modified;
+ d->arch.ptwr[which].prev_nr_updates = modified;
/*
* STEP 3. Reattach the L1 p.t. page into the current address space.
@@ -2424,7 +2419,7 @@
if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
{
- pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
+ pl2e = &linear_l2_table[d->arch.ptwr[which].l2_idx];
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
}
@@ -2432,7 +2427,7 @@
* STEP 4. Final tidy-up.
*/
- ptwr_info[cpu].ptinfo[which].l1va = 0;
+ d->arch.ptwr[which].l1va = 0;
if ( unlikely(sl1e != NULL) )
{
@@ -2570,17 +2565,16 @@
};
/* Write page fault handler: check if guest is trying to modify a PTE. */
-int ptwr_do_page_fault(unsigned long addr)
+int ptwr_do_page_fault(struct domain *d, unsigned long addr)
{
- unsigned long pte, pfn, l2e;
- struct pfn_info *page;
- l2_pgentry_t *pl2e;
- int which, cpu = smp_processor_id();
- u32 l2_idx;
- struct exec_domain *ed = current;
+ unsigned long pte, pfn, l2e;
+ struct pfn_info *page;
+ l2_pgentry_t *pl2e;
+ int which;
+ u32 l2_idx;
/* Can't use linear_l2_table with external tables. */
- BUG_ON(shadow_mode_external(ed->domain));
+ BUG_ON(shadow_mode_external(d));
/*
* Attempt to read the PTE that maps the VA being accessed. By checking for
@@ -2600,7 +2594,7 @@
/* We are looking only for read-only mappings of p.t. pages. */
if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
- (page_get_owner(page) != ed->domain) )
+ (page_get_owner(page) != d) )
{
return 0;
}
@@ -2611,7 +2605,7 @@
#endif
/* Writable pagetables are not yet SMP safe. Use emulator for now. */
- if ( (ed->eid != 0) || (ed->ed_next_list != NULL) )
+ if ( d->exec_domain[0]->ed_next_list != NULL )
goto emulate;
/* Get the L2 index at which this L1 p.t. is always mapped. */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|