ChangeSet 1.1269, 2005/04/05 08:53:54+01:00, mafetter@xxxxxxxxxxxxxxxx
Hand merge
Signed-off-by: michael.fetterman@xxxxxxxxxxxx
tools/libxc/xc.h | 5 ++
xen/arch/x86/domain.c | 97 ++++++++++------------------------------
xen/arch/x86/mm.c | 103 ++++++++++++++++++++++---------------------
xen/arch/x86/traps.c | 11 ++--
xen/common/grant_table.c | 2
xen/common/page_alloc.c | 4 -
xen/include/asm-x86/domain.h | 35 +++++++++++---
xen/include/asm-x86/mm.h | 52 ++++++++-------------
xen/include/xen/domain.h | 2
9 files changed, 138 insertions(+), 173 deletions(-)
diff -Nru a/tools/libxc/xc.h b/tools/libxc/xc.h
--- a/tools/libxc/xc.h 2005-04-05 12:22:05 -04:00
+++ b/tools/libxc/xc.h 2005-04-05 12:22:05 -04:00
@@ -367,6 +367,11 @@
u32 op,
xc_perfc_desc_t *desc);
+/* read/write msr */
+long long xc_msr_read(int xc_handle, int cpu_mask, int msr);
+int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low,
+ unsigned int high);
+
/**
* Memory maps a range within one domain to a local address range. Mappings
* should be unmapped with munmap and should follow the same rules as mmap
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-04-05 12:22:04 -04:00
+++ b/xen/arch/x86/domain.c 2005-04-05 12:22:05 -04:00
@@ -249,12 +249,14 @@
machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
-#if 0 /* don't need this yet, but maybe soon! */
- ed->arch.guest_vtable = linear_l2_table;
- ed->arch.shadow_vtable = shadow_linear_l2_table;
-#endif
+
+ ed->arch.guest_vtable = __linear_l2_table;
+ ed->arch.shadow_vtable = __shadow_linear_l2_table;
#ifdef __x86_64__
+ ed->arch.guest_vl3table = __linear_l3_table;
+ ed->arch.guest_vl4table = __linear_l4_table;
+
d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
@@ -268,6 +270,7 @@
(void)ptwr_init(d);
shadow_lock_init(d);
+ INIT_LIST_HEAD(&d->arch.free_shadow_frames);
}
}
@@ -299,70 +302,6 @@
reset_stack_and_jump(vmx_asm_do_launch);
}
-unsigned long alloc_monitor_pagetable(struct exec_domain *ed)
-{
- unsigned long mmfn;
- l2_pgentry_t *mpl2e;
- struct pfn_info *mmfn_info;
- struct domain *d = ed->domain;
-
- ASSERT(pagetable_val(ed->arch.monitor_table) == 0);
-
- mmfn_info = alloc_domheap_page(NULL);
- ASSERT(mmfn_info != NULL);
-
- mmfn = (unsigned long) (mmfn_info - frame_table);
- mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT);
- memset(mpl2e, 0, PAGE_SIZE);
-
- memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
- &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
- HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
-
- mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK)
- | __PAGE_HYPERVISOR);
-
- ed->arch.monitor_vtable = mpl2e;
-
- /* Map the p2m map into the Read-Only MPT space for this domain. */
- mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
- mk_l2_pgentry(pagetable_val(ed->arch.phys_table) | __PAGE_HYPERVISOR);
-
- return mmfn;
-}
-
-/*
- * Free the pages for monitor_table and hl2_table
- */
-static void free_monitor_pagetable(struct exec_domain *ed)
-{
- l2_pgentry_t *mpl2e;
- unsigned long mfn;
-
- ASSERT( pagetable_val(ed->arch.monitor_table) );
-
- mpl2e = ed->arch.monitor_vtable;
-
- /*
- * First get the mfn for hl2_table by looking at monitor_table
- */
- mfn = l2_pgentry_val(mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])
- >> PAGE_SHIFT;
-
- free_domheap_page(&frame_table[mfn]);
- unmap_domain_mem(mpl2e);
-
- /*
- * Then free monitor_table.
- */
- mfn = (pagetable_val(ed->arch.monitor_table)) >> PAGE_SHIFT;
- free_domheap_page(&frame_table[mfn]);
-
- ed->arch.monitor_table = mk_pagetable(0);
- ed->arch.monitor_vtable = 0;
-}
-
static int vmx_final_setup_guest(struct exec_domain *ed,
full_execution_context_t *full_context)
{
@@ -413,8 +352,6 @@
shadow_mode_enable(ed->domain, SHM_enable|SHM_translate|SHM_external);
}
- update_pagetables(ed);
-
return 0;
out:
@@ -501,7 +438,7 @@
d->vm_assist = c->vm_assist;
phys_basetab = c->pt_base;
- ed->arch.guest_table = ed->arch.phys_table = mk_pagetable(phys_basetab);
+ ed->arch.guest_table = mk_pagetable(phys_basetab);
if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
PGT_base_page_table) )
@@ -520,8 +457,22 @@
}
#ifdef CONFIG_VMX
- if (c->flags & ECF_VMX_GUEST)
- return vmx_final_setup_guest(ed, c);
+ if ( c->flags & ECF_VMX_GUEST )
+ {
+ int error;
+
+ // VMX uses the initially provided page tables as the P2M map.
+ //
+ // XXX: This creates a security issue -- Xen can't necessarily
+ // trust the VMX domain builder. Xen should validate this
+ // page table, and/or build the table itself, or ???
+ //
+ if ( !pagetable_val(d->arch.phys_table) )
+ d->arch.phys_table = ed->arch.guest_table;
+
+ if ( (error = vmx_final_setup_guest(ed, c)) )
+ return error;
+ }
#endif
update_pagetables(ed);
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-04-05 12:22:05 -04:00
+++ b/xen/arch/x86/mm.c 2005-04-05 12:22:05 -04:00
@@ -2085,7 +2085,13 @@
l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
+ {
put_page_from_l1e(ol1e, d);
+ if ( _ol1e & _PAGE_PRESENT )
+ rc = 0; /* Caller needs to invalidate TLB entry */
+ else
+ rc = 1; /* Caller need not invalidate TLB entry */
+ }
else
rc = -EINVAL;
}
@@ -2415,8 +2421,6 @@
* Writable Pagetables
*/
-ptwr_info_t ptwr_info[NR_CPUS];
-
#ifdef VERBOSE
int ptwr_debug = 0x0;
#define PTWR_PRINTK(_f, _a...) \
@@ -2427,20 +2431,18 @@
#endif
/* Flush the given writable p.t. page and write-protect it again. */
-void ptwr_flush(const int which)
+void ptwr_flush(struct domain *d, const int which)
{
unsigned long pte, *ptep, l1va;
l1_pgentry_t *pl1e, ol1e, nl1e;
l2_pgentry_t *pl2e;
- int i, cpu = smp_processor_id();
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ int i;
unsigned int modified = 0;
// not supported in combination with various shadow modes!
ASSERT( !shadow_mode_enabled(d) );
- l1va = ptwr_info[cpu].ptinfo[which].l1va;
+ l1va = d->arch.ptwr[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
/*
@@ -2481,10 +2483,10 @@
* STEP 2. Validate any modified PTEs.
*/
- pl1e = ptwr_info[cpu].ptinfo[which].pl1e;
+ pl1e = d->arch.ptwr[which].pl1e;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
- ol1e = ptwr_info[cpu].ptinfo[which].page[i];
+ ol1e = d->arch.ptwr[which].page[i];
nl1e = pl1e[i];
if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
@@ -2511,7 +2513,7 @@
* Make the remaining p.t's consistent before crashing, so the
* reference counts are correct.
*/
- memcpy(&pl1e[i], &ptwr_info[cpu].ptinfo[which].page[i],
+ memcpy(&pl1e[i], &d->arch.ptwr[which].page[i],
(L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
domain_crash();
break;
@@ -2522,8 +2524,7 @@
unmap_domain_mem(pl1e);
perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
- ptwr_info[cpu].ptinfo[which].prev_exec_domain = ed;
- ptwr_info[cpu].ptinfo[which].prev_nr_updates = modified;
+ d->arch.ptwr[which].prev_nr_updates = modified;
/*
* STEP 3. Reattach the L1 p.t. page into the current address space.
@@ -2531,7 +2532,7 @@
if ( which == PTWR_PT_ACTIVE )
{
- pl2e = &__linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
+ pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|