# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1507021dccdfb2fb0af8c129298348050be4654b
# Parent 0991ed8e4ae577d72b841d85d1cf414a224af172
[HVM][VMX][PAE] Enable PAE VMX guest on PAE host.
The PAE VMX guest supports NX bit and can do kernel build successfully.
Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Signed-off-by: Xiaohui Xin <xiaohui.xin@xxxxxxxxx>
---
xen/arch/x86/Makefile | 2
xen/arch/x86/audit.c | 4 -
xen/arch/x86/hvm/vmx/vmx.c | 13 ----
xen/arch/x86/shadow.c | 125 +++++++++++++++++++++++++--------------
xen/arch/x86/shadow32.c | 12 +--
xen/arch/x86/shadow_guest32pae.c | 2
xen/arch/x86/shadow_public.c | 40 ++++++++----
xen/include/asm-x86/mm.h | 2
xen/include/asm-x86/shadow.h | 44 +++++++++++--
xen/include/asm-x86/shadow_64.h | 36 +++--------
xen/include/asm-x86/shadow_ops.h | 8 ++
11 files changed, 178 insertions(+), 110 deletions(-)
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/Makefile
--- a/xen/arch/x86/Makefile Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/Makefile Mon Jun 19 16:47:21 2006 +0100
@@ -41,7 +41,7 @@ obj-y += x86_emulate.o
obj-y += x86_emulate.o
ifneq ($(pae),n)
-obj-$(x86_32) += shadow.o shadow_public.o shadow_guest32.o
+obj-$(x86_32) += shadow.o shadow_public.o shadow_guest32.o shadow_guest32pae.o
else
obj-$(x86_32) += shadow32.o
endif
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/audit.c
--- a/xen/arch/x86/audit.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/audit.c Mon Jun 19 16:47:21 2006 +0100
@@ -923,8 +923,8 @@ void _audit_domain(struct domain *d, int
d->domain_id, page_to_mfn(page),
page->u.inuse.type_info,
page->count_info);
- printk("a->gpfn_and_flags=%p\n",
- (void *)a->gpfn_and_flags);
+ printk("a->gpfn_and_flags=%"PRIx64"\n",
+ (u64)a->gpfn_and_flags);
errors++;
}
break;
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jun 19 16:47:21 2006 +0100
@@ -1623,7 +1623,7 @@ static int mov_to_cr(int gp, int cr, str
if ( vmx_pgbit_test(v) )
{
/* The guest is a 32-bit PAE guest. */
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
unsigned long mfn, old_base_mfn;
if( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
@@ -1667,7 +1667,7 @@ static int mov_to_cr(int gp, int cr, str
else
{
/* The guest is a 64 bit or 32-bit PAE guest. */
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
if ( (v->domain->arch.ops != NULL) &&
v->domain->arch.ops->guest_paging_levels == PAGING_L2)
{
@@ -1680,15 +1680,6 @@ static int mov_to_cr(int gp, int cr, str
{
printk("Unsupported guest paging levels\n");
/* need to take a clean path */
- domain_crash_synchronous();
- }
- }
- else
- {
- if ( !shadow_set_guest_paging_levels(v->domain,
- PAGING_L4) )
- {
- printk("Unsupported guest paging levels\n");
domain_crash_synchronous();
}
}
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/shadow.c Mon Jun 19 16:47:21 2006 +0100
@@ -222,6 +222,7 @@ alloc_shadow_page(struct domain *d,
unsigned long smfn, real_gpfn;
int pin = 0;
void *l1, *lp;
+ u64 index = 0;
// Currently, we only keep pre-zero'ed pages around for use as L1's...
// This will change. Soon.
@@ -354,9 +355,19 @@ alloc_shadow_page(struct domain *d,
if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
pin = 1;
#endif
+
+#if CONFIG_PAGING_LEVELS == 3 & defined ( GUEST_32PAE )
+ /*
+ * We use PGT_l4_shadow for 2-level paging guests on PAE
+ */
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
+ pin = 1;
+#endif
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
+ index = get_cr3_idxval(current);
break;
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
case PGT_fl1_shadow:
perfc_incr(shadow_l1_pages);
d->arch.shadow_page_count++;
@@ -393,7 +404,7 @@ alloc_shadow_page(struct domain *d,
//
ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) );
- set_shadow_status(d, gpfn, gmfn, smfn, psh_type);
+ set_shadow_status(d, gpfn, gmfn, smfn, psh_type, index);
if ( pin )
shadow_pin(smfn);
@@ -1324,7 +1335,7 @@ increase_writable_pte_prediction(struct
prediction = (prediction & PGT_mfn_mask) | score;
//printk("increase gpfn=%lx pred=%lx create=%d\n", gpfn, prediction,
create);
- set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred);
+ set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred, 0);
if ( create )
perfc_incr(writable_pte_predictions);
@@ -1345,10 +1356,10 @@ decrease_writable_pte_prediction(struct
//printk("decrease gpfn=%lx pred=%lx score=%lx\n", gpfn, prediction,
score);
if ( score )
- set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred);
+ set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred, 0);
else
{
- delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred);
+ delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred,
0);
perfc_decr(writable_pte_predictions);
}
}
@@ -1385,7 +1396,7 @@ static u32 remove_all_write_access_in_pt
int is_l1_shadow =
((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
PGT_l1_shadow);
-#if CONFIG_PAGING_LEVELS == 4
+#if CONFIG_PAGING_LEVELS >= 3
is_l1_shadow |=
((mfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
PGT_fl1_shadow);
@@ -1494,7 +1505,7 @@ static int remove_all_write_access(
while ( a && a->gpfn_and_flags )
{
if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
|| (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow
#endif
)
@@ -1538,8 +1549,8 @@ static void resync_pae_guest_l3(struct d
continue;
idx = get_cr3_idxval(v);
- smfn = __shadow_status(
- d, ((unsigned long)(idx << PGT_pae_idx_shift) | entry->gpfn),
PGT_l4_shadow);
+
+ smfn = __shadow_status(d, entry->gpfn, PGT_l4_shadow);
if ( !smfn )
continue;
@@ -1706,7 +1717,7 @@ static int resync_all(struct domain *d,
{
int error;
-#if CONFIG_PAGING_LEVELS == 4
+#if CONFIG_PAGING_LEVELS >= 3
unsigned long gpfn;
gpfn = guest_l1e_get_paddr(guest1[i]) >> PAGE_SHIFT;
@@ -2420,17 +2431,6 @@ static void shadow_update_pagetables(str
v->arch.guest_vtable = map_domain_page_global(gmfn);
}
-#if CONFIG_PAGING_LEVELS >= 3
- /*
- * Handle 32-bit PAE enabled guest
- */
- if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
- {
- u32 index = get_cr3_idxval(v);
- gpfn = ((unsigned long)index << PGT_pae_idx_shift) | gpfn;
- }
-#endif
-
/*
* arch.shadow_table
*/
@@ -2443,6 +2443,23 @@ static void shadow_update_pagetables(str
if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_l4_shadow))) )
smfn = shadow_l3_table(v, gpfn, gmfn);
}
+ else
+#endif
+
+#if CONFIG_PAGING_LEVELS == 3 & defined ( GUEST_32PAE )
+ /*
+ * We use PGT_l4_shadow for 2-level paging guests on PAE
+ */
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_l4_shadow))) )
+ smfn = shadow_l3_table(v, gpfn, gmfn);
+ else
+ {
+ update_top_level_shadow(v, smfn);
+ need_sync = 1;
+ }
+ }
else
#endif
if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_base_page_table))) )
@@ -3093,6 +3110,36 @@ static inline unsigned long init_bl2(
return smfn;
}
+
+static inline unsigned long init_l3(
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
+{
+ unsigned long smfn;
+ l4_pgentry_t *spl4e;
+ unsigned long index;
+
+ if ( unlikely(!(smfn = alloc_shadow_page(v->domain, gpfn, gmfn,
PGT_l4_shadow))) )
+ {
+ printk("Couldn't alloc an L4 shadow for pfn= %lx mfn= %lx\n", gpfn,
gmfn);
+ BUG(); /* XXX Deal gracefully wiht failure. */
+ }
+
+ /* Map the self entry, L4&L3 share the same page */
+ spl4e = (l4_pgentry_t *)map_domain_page(smfn);
+
+ /*
+ * Shadow L4's pfn_info->tlbflush_timestamp
+ * should also save it's own index.
+ */
+
+ index = get_cr3_idxval(v);
+ frame_table[smfn].tlbflush_timestamp = index;
+
+ memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
+ spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR);
+ unmap_domain_page(spl4e);
+ return smfn;
+}
#endif
#if CONFIG_PAGING_LEVELS == 3
@@ -3111,6 +3158,12 @@ static unsigned long shadow_l3_table(
d->arch.ops->guest_paging_levels == PAGING_L2 )
{
return init_bl2(d, gpfn, gmfn);
+ }
+
+ if ( SH_GUEST_32PAE &&
+ d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ return init_l3(v, gpfn, gmfn);
}
if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l3_shadow))) )
@@ -3223,6 +3276,11 @@ static unsigned long shadow_l4_table(
return init_bl2(d, gpfn, gmfn);
}
+ if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ return init_l3(v, gpfn, gmfn);
+ }
+
if ( unlikely(!(smfn = alloc_shadow_page(d, gpfn, gmfn, PGT_l4_shadow))) )
{
printk("Couldn't alloc an L4 shadow for pfn=%lx mfn=%lx\n", gpfn,
gmfn);
@@ -3230,24 +3288,6 @@ static unsigned long shadow_l4_table(
}
spl4e = (l4_pgentry_t *)map_domain_page(smfn);
-
- /* For 32-bit PAE guest on 64-bit host */
- if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
- {
- unsigned long index;
- /*
- * Shadow L4's pfn_info->tlbflush_timestamp
- * should also save it's own index.
- */
- index = get_cr3_idxval(v);
- frame_table[smfn].tlbflush_timestamp = index;
-
- memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
- /* Map the self entry */
- spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR);
- unmap_domain_page(spl4e);
- return smfn;
- }
/* Install hypervisor and 4x linear p.t. mapings. */
if ( (PGT_base_page_table == PGT_l4_page_table) &&
@@ -3378,7 +3418,7 @@ validate_bl2e_change(
* This shadow_mark_va_out_of_sync() is for 2M page shadow
*/
static void shadow_mark_va_out_of_sync_2mp(
- struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long
writable_pl1e)
+ struct vcpu *v, unsigned long gpfn, unsigned long mfn, paddr_t writable_pl1e)
{
struct out_of_sync_entry *entry =
shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
@@ -3647,6 +3687,7 @@ static inline int l2e_rw_fault(
}
unmap_domain_page(l1_p);
+ *gl2e_p = gl2e;
return 1;
}
@@ -3720,7 +3761,7 @@ static inline int guest_page_fault(
ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 );
-#if CONFIG_PAGING_LEVELS >= 4
+#if CONFIG_PAGING_LEVELS >= 3
if ( (error_code & (ERROR_I | ERROR_P)) == (ERROR_I | ERROR_P) )
return 1;
#endif
@@ -4056,7 +4097,7 @@ struct shadow_ops MODE_32_2_HANDLER = {
};
#endif
-#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) || \
+#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) && !defined
(GUEST_32PAE) ) || \
( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/shadow32.c Mon Jun 19 16:47:21 2006 +0100
@@ -306,7 +306,7 @@ alloc_shadow_page(struct domain *d,
//
ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) );
- set_shadow_status(d, gpfn, gmfn, smfn, psh_type);
+ set_shadow_status(d, gpfn, gmfn, smfn, psh_type, 0);
if ( pin )
shadow_pin(smfn);
@@ -395,7 +395,7 @@ void free_shadow_page(unsigned long smfn
ASSERT( ! IS_INVALID_M2P_ENTRY(gpfn) );
- delete_shadow_status(d, gpfn, gmfn, type);
+ delete_shadow_status(d, gpfn, gmfn, type, 0);
switch ( type )
{
@@ -2319,7 +2319,7 @@ increase_writable_pte_prediction(struct
prediction = (prediction & PGT_mfn_mask) | score;
//printk("increase gpfn=%lx pred=%lx create=%d\n", gpfn, prediction,
create);
- set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred);
+ set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred, 0);
if ( create )
perfc_incr(writable_pte_predictions);
@@ -2340,10 +2340,10 @@ decrease_writable_pte_prediction(struct
//printk("decrease gpfn=%lx pred=%lx score=%lx\n", gpfn, prediction,
score);
if ( score )
- set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred);
+ set_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, prediction,
PGT_writable_pred, 0);
else
{
- delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred);
+ delete_shadow_status(d, GPFN_TO_GPTEPAGE(gpfn), 0, PGT_writable_pred,
0);
perfc_decr(writable_pte_predictions);
}
}
@@ -2381,7 +2381,7 @@ free_writable_pte_predictions(struct dom
* keep an accurate count of writable_pte_predictions to keep it
* happy.
*/
- delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred);
+ delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred, 0);
perfc_decr(writable_pte_predictions);
}
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/shadow_guest32pae.c
--- a/xen/arch/x86/shadow_guest32pae.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/shadow_guest32pae.c Mon Jun 19 16:47:21 2006 +0100
@@ -1,5 +1,4 @@
#define GUEST_32PAE
-#if defined (__x86_64__)
#include "shadow.c"
struct shadow_ops MODE_64_PAE_HANDLER = {
@@ -15,4 +14,3 @@ struct shadow_ops MODE_64_PAE_HANDLER =
.gva_to_gpa = gva_to_gpa_64,
};
-#endif
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/arch/x86/shadow_public.c Mon Jun 19 16:47:21 2006 +0100
@@ -123,8 +123,19 @@ int shadow_set_guest_paging_levels(struc
#endif
#if CONFIG_PAGING_LEVELS == 3
case 3:
- if ( d->arch.ops != &MODE_64_3_HANDLER )
- d->arch.ops = &MODE_64_3_HANDLER;
+ if ( d->arch.ops == NULL ||
+ shadow_mode_log_dirty(d) )
+ {
+ if ( d->arch.ops != &MODE_64_3_HANDLER )
+ d->arch.ops = &MODE_64_3_HANDLER;
+ }
+ else
+ {
+ if ( d->arch.ops == &MODE_64_2_HANDLER )
+ free_shadow_pages(d);
+ if ( d->arch.ops != &MODE_64_PAE_HANDLER )
+ d->arch.ops = &MODE_64_PAE_HANDLER;
+ }
shadow_unlock(d);
return 1;
#endif
@@ -268,10 +279,8 @@ free_shadow_tables(struct domain *d, uns
put_shadow_ref(entry_get_pfn(ple[i]));
if (d->arch.ops->guest_paging_levels == PAGING_L3)
{
-#if CONFIG_PAGING_LEVELS == 4
+#if CONFIG_PAGING_LEVELS >= 3
if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L4 )
-#elif CONFIG_PAGING_LEVELS == 3
- if ( i == PAE_L3_PAGETABLE_ENTRIES && level == PAGING_L3 )
#endif
break;
}
@@ -710,6 +719,7 @@ void free_shadow_page(unsigned long smfn
struct domain *d = page_get_owner(mfn_to_page(gmfn));
unsigned long gpfn = mfn_to_gmfn(d, gmfn);
unsigned long type = page->u.inuse.type_info & PGT_type_mask;
+ u64 index = 0;
SH_VVLOG("%s: free'ing smfn=%lx", __func__, smfn);
@@ -722,12 +732,16 @@ void free_shadow_page(unsigned long smfn
if ( !mfn )
gpfn |= (1UL << 63);
}
+#endif
+#if CONFIG_PAGING_LEVELS >= 3
if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
- if ( type == PGT_l4_shadow )
- gpfn = ((unsigned long)page->tlbflush_timestamp <<
PGT_pae_idx_shift) | gpfn;
-#endif
-
- delete_shadow_status(d, gpfn, gmfn, type);
+ {
+ if ( type == PGT_l4_shadow )
+ index = page->tlbflush_timestamp;
+ }
+#endif
+
+ delete_shadow_status(d, gpfn, gmfn, type, index);
switch ( type )
{
@@ -835,7 +849,7 @@ free_writable_pte_predictions(struct dom
while ( count )
{
count--;
- delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred);
+ delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred, 0);
}
xfree(gpfn_list);
@@ -1050,8 +1064,8 @@ void __shadow_mode_disable(struct domain
{
if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 )
{
- printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%lx\n",
- __FILE__, i, d->arch.shadow_ht[i].gpfn_and_flags);
+ printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%"PRIx64"\n",
+ __FILE__, i, (u64)d->arch.shadow_ht[i].gpfn_and_flags);
BUG();
}
}
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/include/asm-x86/mm.h Mon Jun 19 16:47:21 2006 +0100
@@ -103,13 +103,11 @@ struct page_info
#define PGT_high_mfn_mask (0xfffUL << PGT_high_mfn_shift)
#define PGT_mfn_mask (((1U<<23)-1) | PGT_high_mfn_mask)
#define PGT_high_mfn_nx (0x800UL << PGT_high_mfn_shift)
-#define PGT_pae_idx_shift PGT_high_mfn_shift
#else
/* 23-bit mfn mask for shadow types: good for up to 32GB RAM. */
#define PGT_mfn_mask ((1U<<23)-1)
/* NX for PAE xen is not supported yet */
#define PGT_high_mfn_nx (1ULL << 63)
-#define PGT_pae_idx_shift 23
#endif
#define PGT_score_shift 23
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/include/asm-x86/shadow.h Mon Jun 19 16:47:21 2006 +0100
@@ -112,6 +112,30 @@ do {
} while (0)
#endif
+#if CONFIG_PAGING_LEVELS >= 3
+static inline u64 get_cr3_idxval(struct vcpu *v)
+{
+ u64 pae_cr3;
+
+ if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 &&
+ !shadow_mode_log_dirty(v->domain) )
+ {
+ pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */
+ return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK;
+ }
+ else
+ return 0;
+}
+
+#define shadow_key_t u64
+#define index_to_key(x) ((x) << 32)
+#else
+#define get_cr3_idxval(v) (0)
+#define shadow_key_t unsigned long
+#define index_to_key(x) (0)
+#endif
+
+
#define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((GUEST_L1_PAGETABLE_ENTRIES - 1)
- (_max)) << 16) | (_min))
#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
#define SHADOW_MAX(_encoded) ((GUEST_L1_PAGETABLE_ENTRIES - 1) - ((_encoded)
>> 16))
@@ -309,7 +333,7 @@ extern unsigned long get_mfn_from_gpfn_f
struct shadow_status {
struct shadow_status *next; /* Pull-to-front list per hash bucket. */
- unsigned long gpfn_and_flags; /* Guest pfn plus flags. */
+ shadow_key_t gpfn_and_flags; /* Guest pfn plus flags. */
unsigned long smfn; /* Shadow mfn. */
};
@@ -1180,7 +1204,13 @@ static inline unsigned long __shadow_sta
struct domain *d, unsigned long gpfn, unsigned long stype)
{
struct shadow_status *p, *x, *head;
- unsigned long key = gpfn | stype;
+ shadow_key_t key;
+#if CONFIG_PAGING_LEVELS >= 3
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 && stype ==
PGT_l4_shadow )
+ key = gpfn | stype | index_to_key(get_cr3_idxval(current));
+ else
+#endif
+ key = gpfn | stype;
ASSERT(shadow_lock_is_acquired(d));
ASSERT(gpfn == (gpfn & PGT_mfn_mask));
@@ -1295,10 +1325,11 @@ shadow_max_pgtable_type(struct domain *d
}
static inline void delete_shadow_status(
- struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int
stype)
+ struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int
stype, u64 index)
{
struct shadow_status *p, *x, *n, *head;
- unsigned long key = gpfn | stype;
+
+ shadow_key_t key = gpfn | stype | index_to_key(index);
ASSERT(shadow_lock_is_acquired(d));
ASSERT(!(gpfn & ~PGT_mfn_mask));
@@ -1374,11 +1405,12 @@ static inline void delete_shadow_status(
static inline void set_shadow_status(
struct domain *d, unsigned long gpfn, unsigned long gmfn,
- unsigned long smfn, unsigned long stype)
+ unsigned long smfn, unsigned long stype, u64 index)
{
struct shadow_status *x, *head, *extra;
int i;
- unsigned long key = gpfn | stype;
+
+ shadow_key_t key = gpfn | stype | index_to_key(index);
SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype);
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/include/asm-x86/shadow_64.h
--- a/xen/include/asm-x86/shadow_64.h Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/include/asm-x86/shadow_64.h Mon Jun 19 16:47:21 2006 +0100
@@ -36,9 +36,9 @@
*/
extern struct shadow_ops MODE_64_2_HANDLER;
extern struct shadow_ops MODE_64_3_HANDLER;
+extern struct shadow_ops MODE_64_PAE_HANDLER;
#if CONFIG_PAGING_LEVELS == 4
extern struct shadow_ops MODE_64_4_HANDLER;
-extern struct shadow_ops MODE_64_PAE_HANDLER;
#endif
#if CONFIG_PAGING_LEVELS == 3
@@ -65,10 +65,6 @@ typedef struct { intpte_t l4; } l4_pgent
#define ESH_LOG(_f, _a...) ((void)0)
#endif
-#define PAGING_L4 4UL
-#define PAGING_L3 3UL
-#define PAGING_L2 2UL
-#define PAGING_L1 1UL
#define L_MASK 0xff
#define PAE_PAGING_LEVELS 3
@@ -108,18 +104,14 @@ typedef struct { intpte_t lo; } pgentry_
#define entry_has_changed(x,y,flags) \
( !!(((x).lo ^ (y).lo) &
((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+/******************************************************************************/
+/*
+ * The macro and inlines are for 32-bit PAE guest
+ */
+#define PAE_PDPT_RESERVED 0x1e6 /* [8:5], [2,1] */
+
#define PAE_SHADOW_SELF_ENTRY 259
#define PAE_L3_PAGETABLE_ENTRIES 4
-
-/******************************************************************************/
-/*
- * The macro and inlines are for 32-bit PAE guest on 64-bit host
- */
-#define PAE_CR3_ALIGN 5
-#define PAE_CR3_IDX_MASK 0x7f
-#define PAE_CR3_IDX_NO 128
-
-#define PAE_PDPT_RESERVED 0x1e6 /* [8:5], [2,1] */
/******************************************************************************/
static inline int table_offset_64(unsigned long va, int level)
@@ -186,19 +178,10 @@ static inline int guest_table_offset_64(
}
}
-static inline unsigned long get_cr3_idxval(struct vcpu *v)
-{
- unsigned long pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */
-
- return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK;
-}
-
-
#define SH_GUEST_32PAE 1
#else
#define guest_table_offset_64(va, level, index) \
table_offset_64((va),(level))
-#define get_cr3_idxval(v) 0
#define SH_GUEST_32PAE 0
#endif
@@ -514,7 +497,10 @@ static inline void entry_general(
l1_p =(pgentry_64_t *)map_domain_page(smfn);
for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
- entry_remove_flags(l1_p[i], _PAGE_RW);
+ {
+ if ( mfn_is_page_table(entry_get_pfn(l1_p[i])) )
+ entry_remove_flags(l1_p[i], _PAGE_RW);
+ }
unmap_domain_page(l1_p);
}
diff -r 0991ed8e4ae5 -r 1507021dccdf xen/include/asm-x86/shadow_ops.h
--- a/xen/include/asm-x86/shadow_ops.h Mon Jun 19 16:39:27 2006 +0100
+++ b/xen/include/asm-x86/shadow_ops.h Mon Jun 19 16:47:21 2006 +0100
@@ -21,6 +21,14 @@
#ifndef _XEN_SHADOW_OPS_H
#define _XEN_SHADOW_OPS_H
+
+#define PAGING_L4 4UL
+#define PAGING_L3 3UL
+#define PAGING_L2 2UL
+#define PAGING_L1 1UL
+
+#define PAE_CR3_ALIGN 5
+#define PAE_CR3_IDX_MASK 0x7f
#if defined( GUEST_PGENTRY_32 )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|