# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Node ID 6f0d8434d23f819e935e85bcee3885ce7bc1abe0
# Parent a8d2b1393b769048c7b62822e45bef27eef80fb6
[XEN] Use a separate struct shadow_page_info for shadow pages
and move the definitions of shadow types etc out of public headers.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
xen/arch/x86/mm/shadow/common.c | 443 ++++++++++++++++-----------------------
xen/arch/x86/mm/shadow/multi.c | 258 ++++++++++------------
xen/arch/x86/mm/shadow/private.h | 155 ++++++++++---
xen/arch/x86/mm/shadow/types.h | 24 +-
xen/include/asm-x86/mm.h | 42 ---
xen/include/asm-x86/shadow.h | 26 --
6 files changed, 440 insertions(+), 508 deletions(-)
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/arch/x86/mm/shadow/common.c Thu Nov 23 17:40:28 2006 +0000
@@ -190,7 +190,7 @@ struct x86_emulate_ops shadow_emulator_o
* involves making sure there are no writable mappings available to the guest
* for this page.
*/
-void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type)
+void shadow_promote(struct vcpu *v, mfn_t gmfn, unsigned int type)
{
struct page_info *page = mfn_to_page(gmfn);
@@ -203,8 +203,8 @@ void shadow_promote(struct vcpu *v, mfn_
if ( !test_and_set_bit(_PGC_page_table, &page->count_info) )
page->shadow_flags = 0;
- ASSERT(!test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
- set_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
+ ASSERT(!test_bit(type, &page->shadow_flags));
+ set_bit(type, &page->shadow_flags);
}
void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type)
@@ -212,9 +212,9 @@ void shadow_demote(struct vcpu *v, mfn_t
struct page_info *page = mfn_to_page(gmfn);
ASSERT(test_bit(_PGC_page_table, &page->count_info));
- ASSERT(test_bit(type >> PGC_SH_type_shift, &page->shadow_flags));
-
- clear_bit(type >> PGC_SH_type_shift, &page->shadow_flags);
+ ASSERT(test_bit(type, &page->shadow_flags));
+
+ clear_bit(type, &page->shadow_flags);
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
{
@@ -351,75 +351,6 @@ shadow_validate_guest_pt_write(struct vc
/**************************************************************************/
/* Memory management for shadow pages. */
-
-/* Meaning of the count_info field in shadow pages
- * ----------------------------------------------
- *
- * A count of all references to this page from other shadow pages and
- * guest CR3s (a.k.a. v->arch.shadow.table).
- *
- * The top bits hold the shadow type and the pinned bit. Top-level
- * shadows are pinned so that they don't disappear when not in a CR3
- * somewhere.
- *
- * We don't need to use get|put_page for this as the updates are all
- * protected by the shadow lock. We can't use get|put_page for this
- * as the size of the count on shadow pages is different from that on
- * normal guest pages.
- */
-
-/* Meaning of the type_info field in shadow pages
- * ----------------------------------------------
- *
- * type_info use depends on the shadow type (from count_info)
- *
- * PGC_SH_none : This page is in the shadow free pool. type_info holds
- * the chunk order for our freelist allocator.
- *
- * PGC_SH_l*_shadow : This page is in use as a shadow. type_info
- * holds the mfn of the guest page being shadowed,
- *
- * PGC_SH_fl1_*_shadow : This page is being used to shatter a superpage.
- * type_info holds the gfn being shattered.
- *
- * PGC_SH_monitor_table : This page is part of a monitor table.
- * type_info is not used.
- */
-
-/* Meaning of the _domain field in shadow pages
- * --------------------------------------------
- *
- * In shadow pages, this field will always have its least significant bit
- * set. This ensures that all attempts to get_page() will fail (as all
- * valid pickled domain pointers have a zero for their least significant bit).
- * Instead, the remaining upper bits are used to record the shadow generation
- * counter when the shadow was created.
- */
-
-/* Meaning of the shadow_flags field
- * ----------------------------------
- *
- * In guest pages that are shadowed, one bit for each kind of shadow they have.
- *
- * In shadow pages, will be used for holding a representation of the populated
- * entries in this shadow (either a min/max, or a bitmap, or ...)
- *
- * In monitor-table pages, holds the level of the particular page (to save
- * spilling the shadow types into an extra bit by having three types of monitor
- * page).
- */
-
-/* Meaning of the list_head struct in shadow pages
- * -----------------------------------------------
- *
- * In free shadow pages, this is used to hold the free-lists of chunks.
- *
- * In top-level shadow tables, this holds a linked-list of all top-level
- * shadows (used for recovering memory and destroying shadows).
- *
- * In lower-level shadows, this holds the physical address of a higher-level
- * shadow entry that holds a reference to this shadow (or zero).
- */
/* Allocating shadow pages
* -----------------------
@@ -475,38 +406,32 @@ unsigned int shadow_min_acceptable_pages
vcpu_count++;
return (vcpu_count * 128);
-}
-
-/* Using the type_info field to store freelist order */
-#define SH_PFN_ORDER(_p) ((_p)->u.inuse.type_info)
-#define SH_SET_PFN_ORDER(_p, _o) \
- do { (_p)->u.inuse.type_info = (_o); } while (0)
-
+}
/* Figure out the order of allocation needed for a given shadow type */
static inline u32
-shadow_order(u32 shadow_type)
+shadow_order(unsigned int shadow_type)
{
#if CONFIG_PAGING_LEVELS > 2
static const u32 type_to_order[16] = {
- 0, /* PGC_SH_none */
- 1, /* PGC_SH_l1_32_shadow */
- 1, /* PGC_SH_fl1_32_shadow */
- 2, /* PGC_SH_l2_32_shadow */
- 0, /* PGC_SH_l1_pae_shadow */
- 0, /* PGC_SH_fl1_pae_shadow */
- 0, /* PGC_SH_l2_pae_shadow */
- 0, /* PGC_SH_l2h_pae_shadow */
- 0, /* PGC_SH_l1_64_shadow */
- 0, /* PGC_SH_fl1_64_shadow */
- 0, /* PGC_SH_l2_64_shadow */
- 0, /* PGC_SH_l3_64_shadow */
- 0, /* PGC_SH_l4_64_shadow */
- 2, /* PGC_SH_p2m_table */
- 0 /* PGC_SH_monitor_table */
+ 0, /* SH_type_none */
+ 1, /* SH_type_l1_32_shadow */
+ 1, /* SH_type_fl1_32_shadow */
+ 2, /* SH_type_l2_32_shadow */
+ 0, /* SH_type_l1_pae_shadow */
+ 0, /* SH_type_fl1_pae_shadow */
+ 0, /* SH_type_l2_pae_shadow */
+ 0, /* SH_type_l2h_pae_shadow */
+ 0, /* SH_type_l1_64_shadow */
+ 0, /* SH_type_fl1_64_shadow */
+ 0, /* SH_type_l2_64_shadow */
+ 0, /* SH_type_l3_64_shadow */
+ 0, /* SH_type_l4_64_shadow */
+ 2, /* SH_type_p2m_table */
+ 0 /* SH_type_monitor_table */
};
- u32 type = (shadow_type & PGC_SH_type_mask) >> PGC_SH_type_shift;
- return type_to_order[type];
+ ASSERT(shadow_type < 16);
+ return type_to_order[shadow_type];
#else /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
return 0;
#endif
@@ -528,10 +453,10 @@ static inline int chunk_is_available(str
* non-Xen mappings in this top-level shadow mfn */
void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
{
- struct page_info *pg = mfn_to_page(smfn);
- switch ( (pg->count_info & PGC_SH_type_mask) >> PGC_SH_type_shift )
- {
- case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ switch ( sp->type )
+ {
+ case SH_type_l2_32_shadow:
#if CONFIG_PAGING_LEVELS == 2
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,2,2)(v,smfn);
#else
@@ -539,20 +464,18 @@ void shadow_unhook_mappings(struct vcpu
#endif
break;
#if CONFIG_PAGING_LEVELS >= 3
- case PGC_SH_l2_pae_shadow >> PGC_SH_type_shift:
- case PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift:
+ case SH_type_l2_pae_shadow:
+ case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings,3,3)(v,smfn);
break;
#endif
#if CONFIG_PAGING_LEVELS >= 4
- case PGC_SH_l4_64_shadow >> PGC_SH_type_shift:
+ case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings,4,4)(v,smfn);
break;
#endif
default:
- SHADOW_PRINTK("top-level shadow has bad type %08lx\n",
- (unsigned long)((pg->count_info & PGC_SH_type_mask)
- >> PGC_SH_type_shift));
+ SHADOW_PRINTK("top-level shadow has bad type %08x\n", sp->type);
BUG();
}
}
@@ -569,7 +492,7 @@ void shadow_prealloc(struct domain *d, u
* per-vcpu shadows, any will do */
struct vcpu *v, *v2;
struct list_head *l, *t;
- struct page_info *pg;
+ struct shadow_page_info *sp;
cpumask_t flushmask = CPU_MASK_NONE;
mfn_t smfn;
@@ -584,8 +507,8 @@ void shadow_prealloc(struct domain *d, u
perfc_incrc(shadow_prealloc_1);
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
{
- pg = list_entry(l, struct page_info, list);
- smfn = page_to_mfn(pg);
+ sp = list_entry(l, struct shadow_page_info, list);
+ smfn = shadow_page_to_mfn(sp);
/* Unpin this top-level shadow */
sh_unpin(v, smfn);
@@ -600,8 +523,8 @@ void shadow_prealloc(struct domain *d, u
perfc_incrc(shadow_prealloc_2);
list_for_each_backwards_safe(l, t, &d->arch.shadow.toplevel_shadows)
{
- pg = list_entry(l, struct page_info, list);
- smfn = page_to_mfn(pg);
+ sp = list_entry(l, struct shadow_page_info, list);
+ smfn = shadow_page_to_mfn(sp);
shadow_unhook_mappings(v, smfn);
/* Remember to flush TLBs: we have removed shadow entries that
@@ -642,7 +565,7 @@ static void shadow_blow_tables(unsigned
static void shadow_blow_tables(unsigned char c)
{
struct list_head *l, *t;
- struct page_info *pg;
+ struct shadow_page_info *sp;
struct domain *d;
struct vcpu *v;
mfn_t smfn;
@@ -657,16 +580,16 @@ static void shadow_blow_tables(unsigned
/* Pass one: unpin all top-level pages */
list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
{
- pg = list_entry(l, struct page_info, list);
- smfn = page_to_mfn(pg);
+ sp = list_entry(l, struct shadow_page_info, list);
+ smfn = shadow_page_to_mfn(sp);
sh_unpin(v, smfn);
}
/* Second pass: unhook entries of in-use shadows */
list_for_each_backwards_safe(l,t, &d->arch.shadow.toplevel_shadows)
{
- pg = list_entry(l, struct page_info, list);
- smfn = page_to_mfn(pg);
+ sp = list_entry(l, struct shadow_page_info, list);
+ smfn = shadow_page_to_mfn(sp);
shadow_unhook_mappings(v, smfn);
}
@@ -693,7 +616,7 @@ mfn_t shadow_alloc(struct domain *d,
u32 shadow_type,
unsigned long backpointer)
{
- struct page_info *pg = NULL;
+ struct shadow_page_info *sp = NULL;
unsigned int order = shadow_order(shadow_type);
cpumask_t mask;
void *p;
@@ -701,51 +624,54 @@ mfn_t shadow_alloc(struct domain *d,
ASSERT(shadow_lock_is_acquired(d));
ASSERT(order <= SHADOW_MAX_ORDER);
- ASSERT(shadow_type != PGC_SH_none);
+ ASSERT(shadow_type != SH_type_none);
perfc_incrc(shadow_alloc);
/* Find smallest order which can satisfy the request. */
for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
if ( !list_empty(&d->arch.shadow.freelists[i]) )
{
- pg = list_entry(d->arch.shadow.freelists[i].next,
- struct page_info, list);
- list_del(&pg->list);
+ sp = list_entry(d->arch.shadow.freelists[i].next,
+ struct shadow_page_info, list);
+ list_del(&sp->list);
/* We may have to halve the chunk a number of times. */
while ( i != order )
{
i--;
- SH_SET_PFN_ORDER(pg, i);
- list_add_tail(&pg->list, &d->arch.shadow.freelists[i]);
- pg += 1 << i;
+ sp->order = i;
+ list_add_tail(&sp->list, &d->arch.shadow.freelists[i]);
+ sp += 1 << i;
}
d->arch.shadow.free_pages -= 1 << order;
/* Init page info fields and clear the pages */
for ( i = 0; i < 1<<order ; i++ )
{
- pg[i].u.inuse.type_info = backpointer;
- pg[i].count_info = shadow_type;
- pg[i].shadow_flags = 0;
- INIT_LIST_HEAD(&pg[i].list);
/* Before we overwrite the old contents of this page,
* we need to be sure that no TLB holds a pointer to it. */
mask = d->domain_dirty_cpumask;
- tlbflush_filter(mask, pg[i].tlbflush_timestamp);
+ tlbflush_filter(mask, sp[i].tlbflush_timestamp);
if ( unlikely(!cpus_empty(mask)) )
{
perfc_incrc(shadow_alloc_tlbflush);
flush_tlb_mask(mask);
}
/* Now safe to clear the page for reuse */
- p = sh_map_domain_page(page_to_mfn(pg+i));
+ p = sh_map_domain_page(shadow_page_to_mfn(sp+i));
ASSERT(p != NULL);
clear_page(p);
sh_unmap_domain_page(p);
+ INIT_LIST_HEAD(&sp[i].list);
+ sp[i].type = shadow_type;
+ sp[i].pinned = 0;
+ sp[i].logdirty = 0;
+ sp[i].count = 0;
+ sp[i].backpointer = backpointer;
+ sp[i].next_shadow = NULL;
perfc_incr(shadow_alloc_count);
}
- return page_to_mfn(pg);
+ return shadow_page_to_mfn(sp);
}
/* If we get here, we failed to allocate. This should never happen.
@@ -760,7 +686,7 @@ mfn_t shadow_alloc(struct domain *d,
/* Return some shadow pages to the pool. */
void shadow_free(struct domain *d, mfn_t smfn)
{
- struct page_info *pg = mfn_to_page(smfn);
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
u32 shadow_type;
unsigned long order;
unsigned long mask;
@@ -769,9 +695,9 @@ void shadow_free(struct domain *d, mfn_t
ASSERT(shadow_lock_is_acquired(d));
perfc_incrc(shadow_free);
- shadow_type = pg->count_info & PGC_SH_type_mask;
- ASSERT(shadow_type != PGC_SH_none);
- ASSERT(shadow_type != PGC_SH_p2m_table);
+ shadow_type = sp->type;
+ ASSERT(shadow_type != SH_type_none);
+ ASSERT(shadow_type != SH_type_p2m_table);
order = shadow_order(shadow_type);
d->arch.shadow.free_pages += 1 << order;
@@ -788,12 +714,12 @@ void shadow_free(struct domain *d, mfn_t
}
#endif
/* Strip out the type: this is now a free shadow page */
- pg[i].count_info = 0;
+ sp[i].type = 0;
/* Remember the TLB timestamp so we will know whether to flush
* TLBs when we reuse the page. Because the destructors leave the
* contents of the pages in place, we can delay TLB flushes until
* just before the allocator hands the page out again. */
- pg[i].tlbflush_timestamp = tlbflush_current_time();
+ sp[i].tlbflush_timestamp = tlbflush_current_time();
perfc_decr(shadow_alloc_count);
}
@@ -801,25 +727,23 @@ void shadow_free(struct domain *d, mfn_t
while ( order < SHADOW_MAX_ORDER )
{
mask = 1 << order;
- if ( (mfn_x(page_to_mfn(pg)) & mask) ) {
+ if ( (mfn_x(shadow_page_to_mfn(sp)) & mask) ) {
/* Merge with predecessor block? */
- if ( (((pg-mask)->count_info & PGC_SH_type_mask) != PGT_none)
- || (SH_PFN_ORDER(pg-mask) != order) )
+ if ( ((sp-mask)->type != PGT_none) || ((sp-mask)->order != order) )
break;
- list_del(&(pg-mask)->list);
- pg -= mask;
+ list_del(&(sp-mask)->list);
+ sp -= mask;
} else {
/* Merge with successor block? */
- if ( (((pg+mask)->count_info & PGC_SH_type_mask) != PGT_none)
- || (SH_PFN_ORDER(pg+mask) != order) )
+ if ( ((sp+mask)->type != PGT_none) || ((sp+mask)->order != order) )
break;
- list_del(&(pg+mask)->list);
+ list_del(&(sp+mask)->list);
}
order++;
}
- SH_SET_PFN_ORDER(pg, order);
- list_add_tail(&pg->list, &d->arch.shadow.freelists[order]);
+ sp->order = order;
+ list_add_tail(&sp->list, &d->arch.shadow.freelists[order]);
}
/* Divert some memory from the pool to be used by the p2m mapping.
@@ -843,7 +767,7 @@ shadow_alloc_p2m_pages(struct domain *d)
< (shadow_min_acceptable_pages(d) + (1<<SHADOW_MAX_ORDER)) )
return 0; /* Not enough shadow memory: need to increase it first */
- pg = mfn_to_page(shadow_alloc(d, PGC_SH_p2m_table, 0));
+ pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
d->arch.shadow.p2m_pages += (1<<SHADOW_MAX_ORDER);
d->arch.shadow.total_pages -= (1<<SHADOW_MAX_ORDER);
for (i = 0; i < (1<<SHADOW_MAX_ORDER); i++)
@@ -1221,7 +1145,7 @@ static void shadow_p2m_teardown(struct d
pg = list_entry(entry, struct page_info, list);
list_del(entry);
/* Should have just the one ref we gave it in alloc_p2m_page() */
- if ( (pg->count_info & PGC_SH_count_mask) != 1 )
+ if ( (pg->count_info & PGC_count_mask) != 1 )
{
SHADOW_PRINTK("Odd p2m page count c=%#x t=%"PRtype_info"\n",
pg->count_info, pg->u.inuse.type_info);
@@ -1256,7 +1180,7 @@ static unsigned int set_sh_allocation(st
unsigned int pages,
int *preempted)
{
- struct page_info *pg;
+ struct shadow_page_info *sp;
unsigned int lower_bound;
int j;
@@ -1278,8 +1202,9 @@ static unsigned int set_sh_allocation(st
if ( d->arch.shadow.total_pages < pages )
{
/* Need to allocate more memory from domheap */
- pg = alloc_domheap_pages(NULL, SHADOW_MAX_ORDER, 0);
- if ( pg == NULL )
+ sp = (struct shadow_page_info *)
+ alloc_domheap_pages(NULL, SHADOW_MAX_ORDER, 0);
+ if ( sp == NULL )
{
SHADOW_PRINTK("failed to allocate shadow pages.\n");
return -ENOMEM;
@@ -1288,11 +1213,15 @@ static unsigned int set_sh_allocation(st
d->arch.shadow.total_pages += 1<<SHADOW_MAX_ORDER;
for ( j = 0; j < 1<<SHADOW_MAX_ORDER; j++ )
{
- pg[j].u.inuse.type_info = 0; /* Free page */
- pg[j].tlbflush_timestamp = 0; /* Not in any TLB */
+ sp[j].type = 0;
+ sp[j].pinned = 0;
+ sp[j].logdirty = 0;
+ sp[j].count = 0;
+ sp[j].mbz = 0;
+ sp[j].tlbflush_timestamp = 0; /* Not in any TLB */
}
- SH_SET_PFN_ORDER(pg, SHADOW_MAX_ORDER);
- list_add_tail(&pg->list,
+ sp->order = SHADOW_MAX_ORDER;
+ list_add_tail(&sp->list,
&d->arch.shadow.freelists[SHADOW_MAX_ORDER]);
}
else if ( d->arch.shadow.total_pages > pages )
@@ -1300,12 +1229,12 @@ static unsigned int set_sh_allocation(st
/* Need to return memory to domheap */
shadow_prealloc(d, SHADOW_MAX_ORDER);
ASSERT(!list_empty(&d->arch.shadow.freelists[SHADOW_MAX_ORDER]));
- pg = list_entry(d->arch.shadow.freelists[SHADOW_MAX_ORDER].next,
- struct page_info, list);
- list_del(&pg->list);
+ sp = list_entry(d->arch.shadow.freelists[SHADOW_MAX_ORDER].next,
+ struct shadow_page_info, list);
+ list_del(&sp->list);
d->arch.shadow.free_pages -= 1<<SHADOW_MAX_ORDER;
d->arch.shadow.total_pages -= 1<<SHADOW_MAX_ORDER;
- free_domheap_pages(pg, SHADOW_MAX_ORDER);
+ free_domheap_pages((struct page_info *)sp, SHADOW_MAX_ORDER);
}
/* Check to see if we need to yield and try again */
@@ -1357,7 +1286,7 @@ static void sh_hash_audit_bucket(struct
/* Audit one bucket of the hash table */
{
struct shadow_hash_entry *e, *x;
- struct page_info *pg;
+ struct shadow_page_info *sp;
if ( !(SHADOW_AUDIT_ENABLE) )
return;
@@ -1369,7 +1298,7 @@ static void sh_hash_audit_bucket(struct
/* Empty link? */
BUG_ON( e->t == 0 );
/* Bogus type? */
- BUG_ON( e->t > (PGC_SH_max_shadow >> PGC_SH_type_shift) );
+ BUG_ON( e->t > SH_type_max_shadow );
/* Wrong bucket? */
BUG_ON( sh_hash(e->n, e->t) % SHADOW_HASH_BUCKETS != bucket );
/* Duplicate entry? */
@@ -1377,17 +1306,16 @@ static void sh_hash_audit_bucket(struct
BUG_ON( x->n == e->n && x->t == e->t );
/* Bogus MFN? */
BUG_ON( !valid_mfn(e->smfn) );
- pg = mfn_to_page(e->smfn);
+ sp = mfn_to_shadow_page(e->smfn);
/* Not a shadow? */
- BUG_ON( page_get_owner(pg) != 0 );
+ BUG_ON( sp->mbz != 0 );
/* Wrong kind of shadow? */
- BUG_ON( (pg->count_info & PGC_SH_type_mask) >> PGC_SH_type_shift
- != e->t );
+ BUG_ON( sp->type != e->t );
/* Bad backlink? */
- BUG_ON( pg->u.inuse.type_info != e->n );
- if ( e->t != (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
- && e->t != (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
- && e->t != (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift) )
+ BUG_ON( sp->backpointer != e->n );
+ if ( e->t != SH_type_fl1_32_shadow
+ && e->t != SH_type_fl1_pae_shadow
+ && e->t != SH_type_fl1_64_shadow )
{
struct page_info *gpg = mfn_to_page(_mfn(e->n));
/* Bad shadow flags on guest page? */
@@ -1752,66 +1680,66 @@ static void hash_foreach(struct vcpu *v,
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
{
- struct page_info *pg = mfn_to_page(smfn);
- u32 t = pg->count_info & PGC_SH_type_mask;
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ unsigned int t = sp->type;
SHADOW_PRINTK("smfn=%#lx\n", mfn_x(smfn));
/* Double-check, if we can, that the shadowed page belongs to this
* domain, (by following the back-pointer). */
- ASSERT(t == PGC_SH_fl1_32_shadow ||
- t == PGC_SH_fl1_pae_shadow ||
- t == PGC_SH_fl1_64_shadow ||
- t == PGC_SH_monitor_table ||
- (page_get_owner(mfn_to_page(_mfn(pg->u.inuse.type_info)))
+ ASSERT(t == SH_type_fl1_32_shadow ||
+ t == SH_type_fl1_pae_shadow ||
+ t == SH_type_fl1_64_shadow ||
+ t == SH_type_monitor_table ||
+ (page_get_owner(mfn_to_page(_mfn(sp->backpointer)))
== v->domain));
/* The down-shifts here are so that the switch statement is on nice
* small numbers that the compiler will enjoy */
- switch ( t >> PGC_SH_type_shift )
+ switch ( t )
{
#if CONFIG_PAGING_LEVELS == 2
- case PGC_SH_l1_32_shadow >> PGC_SH_type_shift:
- case PGC_SH_fl1_32_shadow >> PGC_SH_type_shift:
+ case SH_type_l1_32_shadow:
+ case SH_type_fl1_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2, 2)(v, smfn);
break;
- case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
+ case SH_type_l2_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2, 2)(v, smfn);
break;
#else /* PAE or 64bit */
- case PGC_SH_l1_32_shadow >> PGC_SH_type_shift:
- case PGC_SH_fl1_32_shadow >> PGC_SH_type_shift:
+ case SH_type_l1_32_shadow:
+ case SH_type_fl1_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 2)(v, smfn);
break;
- case PGC_SH_l2_32_shadow >> PGC_SH_type_shift:
+ case SH_type_l2_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 2)(v, smfn);
break;
#endif
#if CONFIG_PAGING_LEVELS >= 3
- case PGC_SH_l1_pae_shadow >> PGC_SH_type_shift:
- case PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift:
+ case SH_type_l1_pae_shadow:
+ case SH_type_fl1_pae_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 3)(v, smfn);
break;
- case PGC_SH_l2_pae_shadow >> PGC_SH_type_shift:
- case PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift:
+ case SH_type_l2_pae_shadow:
+ case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 3)(v, smfn);
break;
#endif
#if CONFIG_PAGING_LEVELS >= 4
- case PGC_SH_l1_64_shadow >> PGC_SH_type_shift:
- case PGC_SH_fl1_64_shadow >> PGC_SH_type_shift:
+ case SH_type_l1_64_shadow:
+ case SH_type_fl1_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
break;
- case PGC_SH_l2_64_shadow >> PGC_SH_type_shift:
+ case SH_type_l2_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
break;
- case PGC_SH_l3_64_shadow >> PGC_SH_type_shift:
+ case SH_type_l3_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l3_shadow, 4, 4)(v, smfn);
break;
- case PGC_SH_l4_64_shadow >> PGC_SH_type_shift:
+ case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4, 4)(v, smfn);
break;
#endif
@@ -1867,12 +1795,12 @@ int shadow_remove_write_access(struct vc
};
static unsigned int callback_mask =
- 1 << (PGC_SH_l1_32_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_l1_pae_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_l1_64_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift)
+ 1 << SH_type_l1_32_shadow
+ | 1 << SH_type_fl1_32_shadow
+ | 1 << SH_type_l1_pae_shadow
+ | 1 << SH_type_fl1_pae_shadow
+ | 1 << SH_type_l1_64_shadow
+ | 1 << SH_type_fl1_64_shadow
;
struct page_info *pg = mfn_to_page(gmfn);
@@ -1979,8 +1907,7 @@ int shadow_remove_write_access(struct vc
{
unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
mfn_t last_smfn = _mfn(v->arch.shadow.last_writeable_pte_smfn);
- int shtype = (mfn_to_page(last_smfn)->count_info & PGC_SH_type_mask)
- >> PGC_SH_type_shift;
+ int shtype = mfn_to_shadow_page(last_smfn)->type;
if ( callbacks[shtype] )
callbacks[shtype](v, last_smfn, gmfn);
@@ -2057,12 +1984,12 @@ int shadow_remove_all_mappings(struct vc
};
static unsigned int callback_mask =
- 1 << (PGC_SH_l1_32_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_32_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_l1_pae_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_pae_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_l1_64_shadow >> PGC_SH_type_shift)
- | 1 << (PGC_SH_fl1_64_shadow >> PGC_SH_type_shift)
+ 1 << SH_type_l1_32_shadow
+ | 1 << SH_type_fl1_32_shadow
+ | 1 << SH_type_l1_pae_shadow
+ | 1 << SH_type_fl1_pae_shadow
+ | 1 << SH_type_l1_64_shadow
+ | 1 << SH_type_fl1_64_shadow
;
perfc_incrc(shadow_mappings);
@@ -2106,34 +2033,34 @@ static int sh_remove_shadow_via_pointer(
/* Follow this shadow's up-pointer, if it has one, and remove the reference
* found there. Returns 1 if that was the only reference to this shadow */
{
- struct page_info *pg = mfn_to_page(smfn);
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
mfn_t pmfn;
void *vaddr;
int rc;
- ASSERT((pg->count_info & PGC_SH_type_mask) > 0);
- ASSERT((pg->count_info & PGC_SH_type_mask) < PGC_SH_max_shadow);
- ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2_32_shadow);
- ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2_pae_shadow);
- ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l2h_pae_shadow);
- ASSERT((pg->count_info & PGC_SH_type_mask) != PGC_SH_l4_64_shadow);
+ ASSERT(sp->type > 0);
+ ASSERT(sp->type < SH_type_max_shadow);
+ ASSERT(sp->type != SH_type_l2_32_shadow);
+ ASSERT(sp->type != SH_type_l2_pae_shadow);
+ ASSERT(sp->type != SH_type_l2h_pae_shadow);
+ ASSERT(sp->type != SH_type_l4_64_shadow);
- if (pg->up == 0) return 0;
- pmfn = _mfn(pg->up >> PAGE_SHIFT);
+ if (sp->up == 0) return 0;
+ pmfn = _mfn(sp->up >> PAGE_SHIFT);
ASSERT(valid_mfn(pmfn));
vaddr = sh_map_domain_page(pmfn);
ASSERT(vaddr);
- vaddr += pg->up & (PAGE_SIZE-1);
+ vaddr += sp->up & (PAGE_SIZE-1);
ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
/* Is this the only reference to this shadow? */
- rc = ((pg->count_info & PGC_SH_count_mask) == 1) ? 1 : 0;
+ rc = (sp->count == 1) ? 1 : 0;
/* Blank the offending entry */
- switch ((pg->count_info & PGC_SH_type_mask))
- {
- case PGC_SH_l1_32_shadow:
- case PGC_SH_l2_32_shadow:
+ switch (sp->type)
+ {
+ case SH_type_l1_32_shadow:
+ case SH_type_l2_32_shadow:
#if CONFIG_PAGING_LEVELS == 2
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,2,2)(v, vaddr, pmfn);
#else
@@ -2141,16 +2068,16 @@ static int sh_remove_shadow_via_pointer(
#endif
break;
#if CONFIG_PAGING_LEVELS >=3
- case PGC_SH_l1_pae_shadow:
- case PGC_SH_l2_pae_shadow:
- case PGC_SH_l2h_pae_shadow:
+ case SH_type_l1_pae_shadow:
+ case SH_type_l2_pae_shadow:
+ case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,3)(v, vaddr, pmfn);
break;
#if CONFIG_PAGING_LEVELS >= 4
- case PGC_SH_l1_64_shadow:
- case PGC_SH_l2_64_shadow:
- case PGC_SH_l3_64_shadow:
- case PGC_SH_l4_64_shadow:
+ case SH_type_l1_64_shadow:
+ case SH_type_l2_64_shadow:
+ case SH_type_l3_64_shadow:
+ case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
break;
#endif
@@ -2219,18 +2146,18 @@ void sh_remove_shadows(struct vcpu *v, m
/* Another lookup table, for choosing which mask to use */
static unsigned int masks[16] = {
0, /* none */
- 1 << (PGC_SH_l2_32_shadow >> PGC_SH_type_shift), /* l1_32 */
+ 1 << SH_type_l2_32_shadow, /* l1_32 */
0, /* fl1_32 */
0, /* l2_32 */
- ((1 << (PGC_SH_l2h_pae_shadow >> PGC_SH_type_shift))
- | (1 << (PGC_SH_l2_pae_shadow >> PGC_SH_type_shift))), /* l1_pae */
+ ((1 << SH_type_l2h_pae_shadow)
+ | (1 << SH_type_l2_pae_shadow)), /* l1_pae */
0, /* fl1_pae */
0, /* l2_pae */
0, /* l2h_pae */
- 1 << (PGC_SH_l2_64_shadow >> PGC_SH_type_shift), /* l1_64 */
+ 1 << SH_type_l2_64_shadow, /* l1_64 */
0, /* fl1_64 */
- 1 << (PGC_SH_l3_64_shadow >> PGC_SH_type_shift), /* l2_64 */
- 1 << (PGC_SH_l4_64_shadow >> PGC_SH_type_shift), /* l3_64 */
+ 1 << SH_type_l3_64_shadow, /* l2_64 */
+ 1 << SH_type_l4_64_shadow, /* l3_64 */
0, /* l4_64 */
0, /* p2m */
0 /* unused */
@@ -2257,31 +2184,31 @@ void sh_remove_shadows(struct vcpu *v, m
* call will remove at most one shadow, and terminate immediately when
* it does remove it, so we never walk the hash after doing a deletion. */
#define DO_UNSHADOW(_type) do { \
- t = (_type) >> PGC_SH_type_shift; \
+ t = (_type); \
smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
if ( !sh_remove_shadow_via_pointer(v, smfn) && !fast ) \
hash_foreach(v, masks[t], callbacks, smfn); \
} while (0)
/* Top-level shadows need to be unpinned */
-#define DO_UNPIN(_type) do { \
- t = (_type) >> PGC_SH_type_shift; \
- smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
- if ( mfn_to_page(smfn)->count_info & PGC_SH_pinned ) \
- sh_unpin(v, smfn); \
+#define DO_UNPIN(_type) do { \
+ t = (_type); \
+ smfn = shadow_hash_lookup(v, mfn_x(gmfn), t); \
+ if ( mfn_to_shadow_page(smfn)->pinned ) \
+ sh_unpin(v, smfn); \
} while (0)
- if ( sh_flags & SHF_L1_32 ) DO_UNSHADOW(PGC_SH_l1_32_shadow);
- if ( sh_flags & SHF_L2_32 ) DO_UNPIN(PGC_SH_l2_32_shadow);
+ if ( sh_flags & SHF_L1_32 ) DO_UNSHADOW(SH_type_l1_32_shadow);
+ if ( sh_flags & SHF_L2_32 ) DO_UNPIN(SH_type_l2_32_shadow);
#if CONFIG_PAGING_LEVELS >= 3
- if ( sh_flags & SHF_L1_PAE ) DO_UNSHADOW(PGC_SH_l1_pae_shadow);
- if ( sh_flags & SHF_L2_PAE ) DO_UNPIN(PGC_SH_l2_pae_shadow);
- if ( sh_flags & SHF_L2H_PAE ) DO_UNPIN(PGC_SH_l2h_pae_shadow);
+ if ( sh_flags & SHF_L1_PAE ) DO_UNSHADOW(SH_type_l1_pae_shadow);
+ if ( sh_flags & SHF_L2_PAE ) DO_UNPIN(SH_type_l2_pae_shadow);
+ if ( sh_flags & SHF_L2H_PAE ) DO_UNPIN(SH_type_l2h_pae_shadow);
#if CONFIG_PAGING_LEVELS >= 4
- if ( sh_flags & SHF_L1_64 ) DO_UNSHADOW(PGC_SH_l1_64_shadow);
- if ( sh_flags & SHF_L2_64 ) DO_UNSHADOW(PGC_SH_l2_64_shadow);
- if ( sh_flags & SHF_L3_64 ) DO_UNSHADOW(PGC_SH_l3_64_shadow);
- if ( sh_flags & SHF_L4_64 ) DO_UNPIN(PGC_SH_l4_64_shadow);
+ if ( sh_flags & SHF_L1_64 ) DO_UNSHADOW(SH_type_l1_64_shadow);
+ if ( sh_flags & SHF_L2_64 ) DO_UNSHADOW(SH_type_l2_64_shadow);
+ if ( sh_flags & SHF_L3_64 ) DO_UNSHADOW(SH_type_l3_64_shadow);
+ if ( sh_flags & SHF_L4_64 ) DO_UNPIN(SH_type_l4_64_shadow);
#endif
#endif
@@ -2292,7 +2219,7 @@ void sh_remove_shadows(struct vcpu *v, m
if ( !fast && (pg->count_info & PGC_page_table) )
{
SHADOW_ERROR("can't find all shadows of mfn %05lx "
- "(shadow_flags=%08x)\n",
+ "(shadow_flags=%08lx)\n",
mfn_x(gmfn), pg->shadow_flags);
if ( all )
domain_crash(v->domain);
@@ -3021,16 +2948,16 @@ static int shadow_log_dirty_op(
if ( clean )
{
struct list_head *l, *t;
- struct page_info *pg;
+ struct shadow_page_info *sp;
/* Need to revoke write access to the domain's pages again.
* In future, we'll have a less heavy-handed approach to this,
* but for now, we just unshadow everything except Xen. */
list_for_each_safe(l, t, &d->arch.shadow.toplevel_shadows)
{
- pg = list_entry(l, struct page_info, list);
+ sp = list_entry(l, struct shadow_page_info, list);
if ( d->vcpu[0] != NULL )
- shadow_unhook_mappings(d->vcpu[0], page_to_mfn(pg));
+ shadow_unhook_mappings(d->vcpu[0], shadow_page_to_mfn(sp));
}
d->arch.shadow.fault_count = 0;
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/arch/x86/mm/shadow/multi.c Thu Nov 23 17:40:28 2006 +0000
@@ -100,13 +100,12 @@ get_fl1_shadow_status(struct vcpu *v, gf
get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
/* Look for FL1 shadows in the hash table */
{
- mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn),
- PGC_SH_fl1_shadow >> PGC_SH_type_shift);
+ mfn_t smfn = shadow_hash_lookup(v, gfn_x(gfn), SH_type_fl1_shadow);
if ( unlikely(shadow_mode_log_dirty(v->domain) && valid_mfn(smfn)) )
{
- struct page_info *page = mfn_to_page(smfn);
- if ( !(page->count_info & PGC_SH_log_dirty) )
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ if ( !(sp->logdirty) )
shadow_convert_to_log_dirty(v, smfn);
}
@@ -117,14 +116,13 @@ get_shadow_status(struct vcpu *v, mfn_t
get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
/* Look for shadows in the hash table */
{
- mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn),
- shadow_type >> PGC_SH_type_shift);
+ mfn_t smfn = shadow_hash_lookup(v, mfn_x(gmfn), shadow_type);
perfc_incrc(shadow_get_shadow_status);
if ( unlikely(shadow_mode_log_dirty(v->domain) && valid_mfn(smfn)) )
{
- struct page_info *page = mfn_to_page(smfn);
- if ( !(page->count_info & PGC_SH_log_dirty) )
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ if ( !(sp->logdirty) )
shadow_convert_to_log_dirty(v, smfn);
}
@@ -136,16 +134,15 @@ set_fl1_shadow_status(struct vcpu *v, gf
/* Put an FL1 shadow into the hash table */
{
SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
- gfn_x(gfn), PGC_SH_fl1_shadow, mfn_x(smfn));
+ gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
if ( unlikely(shadow_mode_log_dirty(v->domain)) )
// mark this shadow as a log dirty shadow...
- set_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
+ mfn_to_shadow_page(smfn)->logdirty = 1;
else
- clear_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
-
- shadow_hash_insert(v, gfn_x(gfn),
- PGC_SH_fl1_shadow >> PGC_SH_type_shift, smfn);
+ mfn_to_shadow_page(smfn)->logdirty = 0;
+
+ shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
}
static inline void
@@ -161,15 +158,14 @@ set_shadow_status(struct vcpu *v, mfn_t
if ( unlikely(shadow_mode_log_dirty(d)) )
// mark this shadow as a log dirty shadow...
- set_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
+ mfn_to_shadow_page(smfn)->logdirty = 1;
else
- clear_bit(_PGC_SH_log_dirty, &mfn_to_page(smfn)->count_info);
+ mfn_to_shadow_page(smfn)->logdirty = 0;
res = get_page(mfn_to_page(gmfn), d);
ASSERT(res == 1);
- shadow_hash_insert(v, mfn_x(gmfn), shadow_type >> PGC_SH_type_shift,
- smfn);
+ shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
}
static inline void
@@ -177,9 +173,8 @@ delete_fl1_shadow_status(struct vcpu *v,
/* Remove a shadow from the hash table */
{
SHADOW_PRINTK("gfn=%"SH_PRI_gfn", type=%08x, smfn=%05lx\n",
- gfn_x(gfn), PGC_SH_fl1_shadow, mfn_x(smfn));
- shadow_hash_delete(v, gfn_x(gfn),
- PGC_SH_fl1_shadow >> PGC_SH_type_shift, smfn);
+ gfn_x(gfn), SH_type_fl1_shadow, mfn_x(smfn));
+ shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
}
static inline void
@@ -189,8 +184,7 @@ delete_shadow_status(struct vcpu *v, mfn
SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, type=%08x, smfn=%05lx\n",
v->domain->domain_id, v->vcpu_id,
mfn_x(gmfn), shadow_type, mfn_x(smfn));
- shadow_hash_delete(v, mfn_x(gmfn),
- shadow_type >> PGC_SH_type_shift, smfn);
+ shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
put_page(mfn_to_page(gmfn));
}
@@ -394,27 +388,27 @@ static void sh_audit_gw(struct vcpu *v,
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
if ( valid_mfn(gw->l4mfn)
&& valid_mfn((smfn = get_shadow_status(v, gw->l4mfn,
- PGC_SH_l4_shadow))) )
+ SH_type_l4_shadow))) )
(void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
if ( valid_mfn(gw->l3mfn)
&& valid_mfn((smfn = get_shadow_status(v, gw->l3mfn,
- PGC_SH_l3_shadow))) )
+ SH_type_l3_shadow))) )
(void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
#endif /* PAE or 64... */
if ( valid_mfn(gw->l2mfn) )
{
if ( valid_mfn((smfn = get_shadow_status(v, gw->l2mfn,
- PGC_SH_l2_shadow))) )
+ SH_type_l2_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#if GUEST_PAGING_LEVELS == 3
if ( valid_mfn((smfn = get_shadow_status(v, gw->l2mfn,
- PGC_SH_l2h_shadow))) )
+ SH_type_l2h_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#endif
}
if ( valid_mfn(gw->l1mfn)
&& valid_mfn((smfn = get_shadow_status(v, gw->l1mfn,
- PGC_SH_l1_shadow))) )
+ SH_type_l1_shadow))) )
(void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
else if ( gw->l2e
&& (guest_l2e_get_flags(*gw->l2e) & _PAGE_PSE)
@@ -1193,14 +1187,12 @@ static inline void increment_ptr_to_gues
}
/* All kinds of l1: touch all entries */
-#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
+#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
do { \
int _i; \
shadow_l1e_t *_sp = map_shadow_page((_sl1mfn)); \
- ASSERT((mfn_to_page(_sl1mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l1_shadow \
- || (mfn_to_page(_sl1mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_fl1_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow \
+ || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \
for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \
{ \
(_sl1e) = _sp + _i; \
@@ -1214,18 +1206,18 @@ do {
/* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */
#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
-#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
+#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
do { \
int __done = 0; \
- _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \
+ _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \
({ (__done = _done); }), _code); \
_sl1mfn = _mfn(mfn_x(_sl1mfn) + 1); \
if ( !__done ) \
- _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \
+ _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \
({ (__done = _done); }), _code); \
} while (0)
#else /* Everything else; l1 shadows are only one page */
-#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
+#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
_SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)
#endif
@@ -1233,11 +1225,10 @@ do {
#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
/* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
do { \
int _i, _j, __done = 0; \
- ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l2_32_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \
for ( _j = 0; _j < 4 && !__done; _j++ ) \
{ \
shadow_l2e_t *_sp = map_shadow_page(_sl2mfn); \
@@ -1260,12 +1251,11 @@ do {
#elif GUEST_PAGING_LEVELS == 2
/* 32-bit on 32-bit: avoid Xen entries */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
do { \
int _i; \
shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
- ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l2_32_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
if ( (!(_xen)) \
|| \
@@ -1283,18 +1273,15 @@ do {
#elif GUEST_PAGING_LEVELS == 3
/* PAE: if it's an l2h, don't touch Xen mappings */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
do { \
int _i; \
shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
- ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l2_pae_shadow \
- || (mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l2h_pae_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow \
+ || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
if ( (!(_xen)) \
- || ((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- != PGC_SH_l2h_pae_shadow) \
+ || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\
|| ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES)) \
< (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
{ \
@@ -1310,12 +1297,11 @@ do {
#else
/* 64-bit l2: touch all entries */
-#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
+#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _xen, _code) \
do { \
int _i; \
shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \
- ASSERT((mfn_to_page(_sl2mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l2_64_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow); \
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
{ \
(_sl2e) = _sp + _i; \
@@ -1332,12 +1318,11 @@ do {
#if GUEST_PAGING_LEVELS == 4
/* 64-bit l3: touch all entries */
-#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code) \
+#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code) \
do { \
int _i; \
shadow_l3e_t *_sp = map_shadow_page((_sl3mfn)); \
- ASSERT((mfn_to_page(_sl3mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l3_64_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow); \
for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ ) \
{ \
(_sl3e) = _sp + _i; \
@@ -1350,12 +1335,11 @@ do {
} while (0)
/* 64-bit l4: avoid Xen mappings */
-#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code) \
+#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _xen, _code) \
do { \
int _i; \
shadow_l4e_t *_sp = map_shadow_page((_sl4mfn)); \
- ASSERT((mfn_to_page(_sl4mfn)->count_info & PGC_SH_type_mask) \
- == PGC_SH_l4_64_shadow); \
+ ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow); \
for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ ) \
{ \
if ( (!(_xen)) || is_guest_l4_slot(_i) ) \
@@ -1556,12 +1540,12 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",
mfn_x(gmfn), shadow_type, mfn_x(smfn));
- if ( shadow_type != PGC_SH_l2_32_shadow
- && shadow_type != PGC_SH_l2_pae_shadow
- && shadow_type != PGC_SH_l2h_pae_shadow
- && shadow_type != PGC_SH_l4_64_shadow )
+ if ( shadow_type != SH_type_l2_32_shadow
+ && shadow_type != SH_type_l2_pae_shadow
+ && shadow_type != SH_type_l2h_pae_shadow
+ && shadow_type != SH_type_l4_64_shadow )
/* Lower-level shadow, not yet linked form a higher level */
- mfn_to_page(smfn)->up = 0;
+ mfn_to_shadow_page(smfn)->up = 0;
// Create the Xen mappings...
if ( !shadow_mode_external(v->domain) )
@@ -1569,15 +1553,15 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
switch (shadow_type)
{
#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
- case PGC_SH_l4_shadow:
+ case SH_type_l4_shadow:
sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
#endif
#if CONFIG_PAGING_LEVELS == 3 && GUEST_PAGING_LEVELS == 3
- case PGC_SH_l2h_shadow:
+ case SH_type_l2h_shadow:
sh_install_xen_entries_in_l2h(v, smfn); break;
#endif
#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
- case PGC_SH_l2_shadow:
+ case SH_type_l2_shadow:
sh_install_xen_entries_in_l2(v, gmfn, smfn); break;
#endif
default: /* Do nothing */ break;
@@ -1594,7 +1578,7 @@ static mfn_t
static mfn_t
make_fl1_shadow(struct vcpu *v, gfn_t gfn)
{
- mfn_t smfn = shadow_alloc(v->domain, PGC_SH_fl1_shadow,
+ mfn_t smfn = shadow_alloc(v->domain, SH_type_fl1_shadow,
(unsigned long) gfn_x(gfn));
SHADOW_DEBUG(MAKE_SHADOW, "(%" SH_PRI_gfn ")=>%" SH_PRI_mfn "\n",
@@ -1616,7 +1600,7 @@ sh_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
mfn_t m4mfn;
- m4mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
+ m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
sh_install_xen_entries_in_l4(v, m4mfn, m4mfn);
/* Remember the level of this table */
mfn_to_page(m4mfn)->shadow_flags = 4;
@@ -1626,7 +1610,7 @@ sh_make_monitor_table(struct vcpu *v)
{
mfn_t m3mfn;
l4_pgentry_t *l4e;
- m3mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
+ m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e = sh_map_domain_page(m4mfn);
l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
@@ -1645,13 +1629,13 @@ sh_make_monitor_table(struct vcpu *v)
l2_pgentry_t *l2e;
int i;
- m3mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
+ m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
/* Remember the level of this table */
mfn_to_page(m3mfn)->shadow_flags = 3;
// Install a monitor l2 table in slot 3 of the l3 table.
// This is used for all Xen entries, including linear maps
- m2mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
+ m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = sh_map_domain_page(m3mfn);
l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
@@ -1675,7 +1659,7 @@ sh_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
mfn_t m2mfn;
- m2mfn = shadow_alloc(d, PGC_SH_monitor_table, 0);
+ m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
sh_install_xen_entries_in_l2(v, m2mfn, m2mfn);
/* Remember the level of this table */
mfn_to_page(m2mfn)->shadow_flags = 2;
@@ -1732,11 +1716,11 @@ static shadow_l3e_t * shadow_get_and_cre
int r;
shadow_l4e_t new_sl4e;
/* No l3 shadow installed: find and install it. */
- *sl3mfn = get_shadow_status(v, gw->l3mfn, PGC_SH_l3_shadow);
+ *sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
if ( !valid_mfn(*sl3mfn) )
{
/* No l3 shadow of this page exists at all: make one. */
- *sl3mfn = sh_make_shadow(v, gw->l3mfn, PGC_SH_l3_shadow);
+ *sl3mfn = sh_make_shadow(v, gw->l3mfn, SH_type_l3_shadow);
}
/* Install the new sl3 table in the sl4e */
l4e_propagate_from_guest(v, gw->l4e, gw->l4mfn,
@@ -1772,11 +1756,11 @@ static shadow_l2e_t * shadow_get_and_cre
int r;
shadow_l3e_t new_sl3e;
/* No l2 shadow installed: find and install it. */
- *sl2mfn = get_shadow_status(v, gw->l2mfn, PGC_SH_l2_shadow);
+ *sl2mfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow);
if ( !valid_mfn(*sl2mfn) )
{
/* No l2 shadow of this page exists at all: make one. */
- *sl2mfn = sh_make_shadow(v, gw->l2mfn, PGC_SH_l2_shadow);
+ *sl2mfn = sh_make_shadow(v, gw->l2mfn, SH_type_l2_shadow);
}
/* Install the new sl2 table in the sl3e */
l3e_propagate_from_guest(v, gw->l3e, gw->l3mfn,
@@ -1852,11 +1836,11 @@ static shadow_l1e_t * shadow_get_and_cre
{
/* Shadowing an actual guest l1 table */
if ( !valid_mfn(gw->l2mfn) ) return NULL; /* No guest page. */
- *sl1mfn = get_shadow_status(v, gw->l1mfn, PGC_SH_l1_shadow);
+ *sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
if ( !valid_mfn(*sl1mfn) )
{
/* No l1 shadow of this page exists at all: make one. */
- *sl1mfn = sh_make_shadow(v, gw->l1mfn, PGC_SH_l1_shadow);
+ *sl1mfn = sh_make_shadow(v, gw->l1mfn, SH_type_l1_shadow);
}
}
/* Install the new sl1 table in the sl2e */
@@ -1891,20 +1875,20 @@ void sh_destroy_l4_shadow(struct vcpu *v
void sh_destroy_l4_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l4e_t *sl4e;
- u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
+ u32 t = mfn_to_shadow_page(smfn)->type;
mfn_t gmfn, sl4mfn;
int xen_mappings;
SHADOW_DEBUG(DESTROY_SHADOW,
"%s(%05lx)\n", __func__, mfn_x(smfn));
- ASSERT(t == PGC_SH_l4_shadow);
+ ASSERT(t == SH_type_l4_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Take this shadow off the list of root shadows */
- list_del_init(&mfn_to_page(smfn)->list);
+ list_del_init(&mfn_to_shadow_page(smfn)->list);
/* Decrement refcounts of all the old entries */
xen_mappings = (!shadow_mode_external(v->domain));
@@ -1925,15 +1909,15 @@ void sh_destroy_l3_shadow(struct vcpu *v
void sh_destroy_l3_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l3e_t *sl3e;
- u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
+ u32 t = mfn_to_shadow_page(smfn)->type;
mfn_t gmfn, sl3mfn;
SHADOW_DEBUG(DESTROY_SHADOW,
"%s(%05lx)\n", __func__, mfn_x(smfn));
- ASSERT(t == PGC_SH_l3_shadow);
+ ASSERT(t == SH_type_l3_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
@@ -1955,22 +1939,22 @@ void sh_destroy_l2_shadow(struct vcpu *v
void sh_destroy_l2_shadow(struct vcpu *v, mfn_t smfn)
{
shadow_l2e_t *sl2e;
- u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
+ u32 t = mfn_to_shadow_page(smfn)->type;
mfn_t gmfn, sl2mfn;
int xen_mappings;
SHADOW_DEBUG(DESTROY_SHADOW,
"%s(%05lx)\n", __func__, mfn_x(smfn));
- ASSERT(t == PGC_SH_l2_shadow
- || t == PGC_SH_l2h_pae_shadow);
+ ASSERT(t == SH_type_l2_shadow
+ || t == SH_type_l2h_pae_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
+ gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
#if (GUEST_PAGING_LEVELS == 2) || (GUEST_PAGING_LEVELS == 3)
/* Take this shadow off the list of root shadows */
- list_del_init(&mfn_to_page(smfn)->list);
+ list_del_init(&mfn_to_shadow_page(smfn)->list);
#endif
/* Decrement refcounts of all the old entries */
@@ -1978,7 +1962,7 @@ void sh_destroy_l2_shadow(struct vcpu *v
xen_mappings = (!shadow_mode_external(v->domain) &&
((GUEST_PAGING_LEVELS == 2) ||
((GUEST_PAGING_LEVELS == 3) &&
- (t == PGC_SH_l2h_pae_shadow))));
+ (t == SH_type_l2h_pae_shadow))));
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, xen_mappings, {
if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
@@ -1994,21 +1978,21 @@ void sh_destroy_l1_shadow(struct vcpu *v
{
struct domain *d = v->domain;
shadow_l1e_t *sl1e;
- u32 t = mfn_to_page(smfn)->count_info & PGC_SH_type_mask;
+ u32 t = mfn_to_shadow_page(smfn)->type;
SHADOW_DEBUG(DESTROY_SHADOW,
"%s(%05lx)\n", __func__, mfn_x(smfn));
- ASSERT(t == PGC_SH_l1_shadow || t == PGC_SH_fl1_shadow);
+ ASSERT(t == SH_type_l1_shadow || t == SH_type_fl1_shadow);
/* Record that the guest page isn't shadowed any more (in this type) */
- if ( t == PGC_SH_fl1_shadow )
- {
- gfn_t gfn = _gfn(mfn_to_page(smfn)->u.inuse.type_info);
+ if ( t == SH_type_fl1_shadow )
+ {
+ gfn_t gfn = _gfn(mfn_to_shadow_page(smfn)->backpointer);
delete_fl1_shadow_status(v, gfn, smfn);
}
else
{
- mfn_t gmfn = _mfn(mfn_to_page(smfn)->u.inuse.type_info);
+ mfn_t gmfn = _mfn(mfn_to_shadow_page(smfn)->backpointer);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
@@ -2032,8 +2016,7 @@ void sh_destroy_monitor_table(struct vcp
void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
{
struct domain *d = v->domain;
- ASSERT((mfn_to_page(mmfn)->count_info & PGC_SH_type_mask)
- == PGC_SH_monitor_table);
+ ASSERT(mfn_to_shadow_page(mmfn)->type == SH_type_monitor_table);
#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
/* Need to destroy the l3 monitor page in slot 0 too */
@@ -2129,7 +2112,7 @@ static int validate_gl4e(struct vcpu *v,
gfn_t gl3gfn = guest_l4e_get_gfn(*new_gl4e);
mfn_t gl3mfn = vcpu_gfn_to_mfn(v, gl3gfn);
if ( valid_mfn(gl3mfn) )
- sl3mfn = get_shadow_status(v, gl3mfn, PGC_SH_l3_shadow);
+ sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
else
result |= SHADOW_SET_ERROR;
}
@@ -2181,7 +2164,7 @@ static int validate_gl3e(struct vcpu *v,
gfn_t gl2gfn = guest_l3e_get_gfn(*new_gl3e);
mfn_t gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
if ( valid_mfn(gl2mfn) )
- sl2mfn = get_shadow_status(v, gl2mfn, PGC_SH_l2_shadow);
+ sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
else
result |= SHADOW_SET_ERROR;
}
@@ -2225,7 +2208,7 @@ static int validate_gl2e(struct vcpu *v,
{
mfn_t gl1mfn = vcpu_gfn_to_mfn(v, gl1gfn);
if ( valid_mfn(gl1mfn) )
- sl1mfn = get_shadow_status(v, gl1mfn, PGC_SH_l1_shadow);
+ sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
else
result |= SHADOW_SET_ERROR;
}
@@ -2246,8 +2229,7 @@ static int validate_gl2e(struct vcpu *v,
#if SHADOW_PAGING_LEVELS == 3
reserved_xen_slot =
- (((mfn_to_page(sl2mfn)->count_info & PGC_SH_type_mask)
- == PGC_SH_l2h_pae_shadow) &&
+ ((mfn_to_shadow_page(sl2mfn)->type == SH_type_l2h_pae_shadow) &&
(shadow_index
>= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
#else /* SHADOW_PAGING_LEVELS == 2 */
@@ -2365,7 +2347,7 @@ sh_map_and_validate_gl4e(struct vcpu *v,
{
#if GUEST_PAGING_LEVELS >= 4
return sh_map_and_validate(v, gl4mfn, new_gl4p, size,
- PGC_SH_l4_shadow,
+ SH_type_l4_shadow,
shadow_l4_index,
validate_gl4e);
#else // ! GUEST_PAGING_LEVELS >= 4
@@ -2381,7 +2363,7 @@ sh_map_and_validate_gl3e(struct vcpu *v,
{
#if GUEST_PAGING_LEVELS >= 4
return sh_map_and_validate(v, gl3mfn, new_gl3p, size,
- PGC_SH_l3_shadow,
+ SH_type_l3_shadow,
shadow_l3_index,
validate_gl3e);
#else // ! GUEST_PAGING_LEVELS >= 4
@@ -2396,7 +2378,7 @@ sh_map_and_validate_gl2e(struct vcpu *v,
void *new_gl2p, u32 size)
{
return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
- PGC_SH_l2_shadow,
+ SH_type_l2_shadow,
shadow_l2_index,
validate_gl2e);
}
@@ -2407,7 +2389,7 @@ sh_map_and_validate_gl2he(struct vcpu *v
{
#if GUEST_PAGING_LEVELS == 3
return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
- PGC_SH_l2h_shadow,
+ SH_type_l2h_shadow,
shadow_l2_index,
validate_gl2e);
#else /* Non-PAE guests don't have different kinds of l2 table */
@@ -2422,7 +2404,7 @@ sh_map_and_validate_gl1e(struct vcpu *v,
void *new_gl1p, u32 size)
{
return sh_map_and_validate(v, gl1mfn, new_gl1p, size,
- PGC_SH_l1_shadow,
+ SH_type_l1_shadow,
shadow_l1_index,
validate_gl1e);
}
@@ -2923,8 +2905,8 @@ sh_invlpg(struct vcpu *v, unsigned long
// If so, then we'll need to flush the entire TLB (because that's
// easier than invalidating all of the individual 4K pages).
//
- if ( (mfn_to_page(shadow_l2e_get_mfn(sl2e))->count_info &
- PGC_SH_type_mask) == PGC_SH_fl1_shadow )
+ if ( mfn_to_shadow_page(shadow_l2e_get_mfn(sl2e))->type
+ == SH_type_fl1_shadow )
{
local_flush_tlb();
return 0;
@@ -3284,8 +3266,9 @@ sh_set_toplevel_shadow(struct vcpu *v,
if ( valid_mfn(smfn) )
{
/* Pull this root shadow to the front of the list of roots. */
- list_del(&mfn_to_page(smfn)->list);
- list_add(&mfn_to_page(smfn)->list, &d->arch.shadow.toplevel_shadows);
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
+ list_del(&sp->list);
+ list_add(&sp->list, &d->arch.shadow.toplevel_shadows);
}
else
{
@@ -3293,7 +3276,8 @@ sh_set_toplevel_shadow(struct vcpu *v,
shadow_prealloc(d, SHADOW_MAX_ORDER);
/* Shadow the page. */
smfn = sh_make_shadow(v, gmfn, root_type);
- list_add(&mfn_to_page(smfn)->list, &d->arch.shadow.toplevel_shadows);
+ list_add(&mfn_to_shadow_page(smfn)->list,
+ &d->arch.shadow.toplevel_shadows);
}
ASSERT(valid_mfn(smfn));
@@ -3444,7 +3428,7 @@ sh_update_cr3(struct vcpu *v)
#if GUEST_PAGING_LEVELS == 2
if ( shadow_remove_write_access(v, gmfn, 2, 0) != 0 )
flush_tlb_mask(v->domain->domain_dirty_cpumask);
- sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l2_shadow);
+ sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries, based on the
* current values of the guest's four l3es. */
@@ -3473,15 +3457,15 @@ sh_update_cr3(struct vcpu *v)
gl2gfn = guest_l3e_get_gfn(gl3e[i]);
gl2mfn = vcpu_gfn_to_mfn(v, gl2gfn);
sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
- ? PGC_SH_l2h_shadow
- : PGC_SH_l2_shadow);
+ ? SH_type_l2h_shadow
+ : SH_type_l2_shadow);
}
}
}
#elif GUEST_PAGING_LEVELS == 4
if ( shadow_remove_write_access(v, gmfn, 4, 0) != 0 )
flush_tlb_mask(v->domain->domain_dirty_cpumask);
- sh_set_toplevel_shadow(v, 0, gmfn, PGC_SH_l4_shadow);
+ sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
#else
#error This should never happen
#endif
@@ -3667,19 +3651,19 @@ void sh_clear_shadow_entry(struct vcpu *
void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
/* Blank out a single shadow entry */
{
- switch (mfn_to_page(smfn)->count_info & PGC_SH_type_mask)
- {
- case PGC_SH_l1_shadow:
+ switch ( mfn_to_shadow_page(smfn)->type )
+ {
+ case SH_type_l1_shadow:
shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
- case PGC_SH_l2_shadow:
+ case SH_type_l2_shadow:
#if GUEST_PAGING_LEVELS == 3
- case PGC_SH_l2h_shadow:
+ case SH_type_l2h_shadow:
#endif
shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
#if GUEST_PAGING_LEVELS >= 4
- case PGC_SH_l3_shadow:
+ case SH_type_l3_shadow:
shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
- case PGC_SH_l4_shadow:
+ case SH_type_l4_shadow:
shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
#endif
default: BUG(); /* Called with the wrong kind of shadow. */
@@ -3703,7 +3687,7 @@ int sh_remove_l1_shadow(struct vcpu *v,
&& (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
{
shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
- if ( (mfn_to_page(sl1mfn)->count_info & PGC_SH_type_mask) == 0 )
+ if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
@@ -3726,7 +3710,7 @@ int sh_remove_l2_shadow(struct vcpu *v,
&& (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
{
shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
- if ( (mfn_to_page(sl2mfn)->count_info & PGC_SH_type_mask) == 0 )
+ if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
@@ -3748,7 +3732,7 @@ int sh_remove_l3_shadow(struct vcpu *v,
&& (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
{
shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
- if ( (mfn_to_page(sl3mfn)->count_info & PGC_SH_type_mask) == 0 )
+ if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
}
@@ -3986,7 +3970,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl1mfn = _mfn(mfn_to_page(sl1mfn)->u.inuse.type_info);
+ gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer);
gl1e = gp = sh_map_domain_page(gl1mfn);
SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, {
@@ -4068,7 +4052,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
#endif
/* Follow the backpointer */
- gl2mfn = _mfn(mfn_to_page(sl2mfn)->u.inuse.type_info);
+ gl2mfn = _mfn(mfn_to_shadow_page(sl2mfn)->backpointer);
gl2e = gp = sh_map_domain_page(gl2mfn);
SHADOW_FOREACH_L2E(sl2mfn, sl2e, &gl2e, done, xen_mappings, {
@@ -4083,7 +4067,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
? get_fl1_shadow_status(v, gfn)
: get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl2mfn),
- PGC_SH_l1_shadow);
+ SH_type_l1_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
" (--> %" SH_PRI_mfn ")"
@@ -4109,7 +4093,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
int done = 0;
/* Follow the backpointer */
- gl3mfn = _mfn(mfn_to_page(sl3mfn)->u.inuse.type_info);
+ gl3mfn = _mfn(mfn_to_shadow_page(sl3mfn)->backpointer);
gl3e = gp = sh_map_domain_page(gl3mfn);
SHADOW_FOREACH_L3E(sl3mfn, sl3e, &gl3e, done, {
@@ -4125,8 +4109,8 @@ int sh_audit_l3_table(struct vcpu *v, mf
(GUEST_PAGING_LEVELS == 3
&& !shadow_mode_external(v->domain)
&& (guest_index(gl3e) % 4) == 3)
- ? PGC_SH_l2h_pae_shadow
- : PGC_SH_l2_shadow);
+ ? SH_type_l2h_pae_shadow
+ : SH_type_l2_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(3, "bad translation: gfn %" SH_PRI_gfn
" --> %" SH_PRI_mfn " != mfn %" SH_PRI_mfn,
@@ -4148,7 +4132,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
int xen_mappings = !shadow_mode_external(v->domain);
/* Follow the backpointer */
- gl4mfn = _mfn(mfn_to_page(sl4mfn)->u.inuse.type_info);
+ gl4mfn = _mfn(mfn_to_shadow_page(sl4mfn)->backpointer);
gl4e = gp = sh_map_domain_page(gl4mfn);
SHADOW_FOREACH_L4E(sl4mfn, sl4e, &gl4e, done, xen_mappings,
{
@@ -4161,7 +4145,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
gfn = guest_l4e_get_gfn(*gl4e);
mfn = shadow_l4e_get_mfn(*sl4e);
gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl4mfn),
- PGC_SH_l3_shadow);
+ SH_type_l3_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
" --> %" SH_PRI_mfn " != mfn %" SH_PRI_mfn,
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/arch/x86/mm/shadow/private.h Thu Nov 23 17:40:28 2006 +0000
@@ -129,6 +129,97 @@ extern void shadow_audit_p2m(struct doma
#undef SHADOW_LEVELS
#endif /* CONFIG_PAGING_LEVELS == 4 */
+/******************************************************************************
+ * Page metadata for shadow pages.
+ */
+
+struct shadow_page_info
+{
+ union {
+ /* When in use, guest page we're a shadow of */
+ unsigned long backpointer;
+ /* When free, order of the freelist we're on */
+ unsigned int order;
+ };
+ union {
+ /* When in use, next shadow in this hash chain */
+ struct shadow_page_info *next_shadow;
+ /* When free, TLB flush time when freed */
+ u32 tlbflush_timestamp;
+ };
+ struct {
+ unsigned int type:4; /* What kind of shadow is this? */
+ unsigned int pinned:1; /* Is the shadow pinned? */
+ unsigned int logdirty:1; /* Was it made in log-dirty mode? */
+ unsigned int count:26; /* Reference count */
+ u32 mbz; /* Must be zero: this is where the owner
+ * field lives in a non-shadow page */
+ } __attribute__((packed));
+ union {
+ /* For unused shadow pages, a list of pages of this order;
+ * for top-level shadows, a list of other top-level shadows */
+ struct list_head list;
+ /* For lower-level shadows, a higher entry that points at us */
+ paddr_t up;
+ };
+};
+
+/* The structure above *must* be the same size as a struct page_info
+ * from mm.h, since we'll be using the same space in the frametable.
+ * Also, the mbz field must line up with the owner field of normal
+ * pages, so they look properly like anonymous/xen pages. */
+static inline void shadow_check_page_struct_offsets(void) {
+ BUILD_BUG_ON(sizeof (struct shadow_page_info)
+ != sizeof (struct page_info));
+ BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz)
+ != offsetof(struct page_info, u.inuse._domain));
+};
+
+/* Shadow type codes */
+#define SH_type_none (0U) /* on the shadow free list */
+#define SH_type_min_shadow (1U)
+#define SH_type_l1_32_shadow (1U) /* shadowing a 32-bit L1 guest page */
+#define SH_type_fl1_32_shadow (2U) /* L1 shadow for a 32b 4M superpage */
+#define SH_type_l2_32_shadow (3U) /* shadowing a 32-bit L2 guest page */
+#define SH_type_l1_pae_shadow (4U) /* shadowing a pae L1 page */
+#define SH_type_fl1_pae_shadow (5U) /* L1 shadow for pae 2M superpg */
+#define SH_type_l2_pae_shadow (6U) /* shadowing a pae L2-low page */
+#define SH_type_l2h_pae_shadow (7U) /* shadowing a pae L2-high page */
+#define SH_type_l1_64_shadow (8U) /* shadowing a 64-bit L1 page */
+#define SH_type_fl1_64_shadow (9U) /* L1 shadow for 64-bit 2M superpg */
+#define SH_type_l2_64_shadow (10U) /* shadowing a 64-bit L2 page */
+#define SH_type_l3_64_shadow (11U) /* shadowing a 64-bit L3 page */
+#define SH_type_l4_64_shadow (12U) /* shadowing a 64-bit L4 page */
+#define SH_type_max_shadow (12U)
+#define SH_type_p2m_table (13U) /* in use as the p2m table */
+#define SH_type_monitor_table (14U) /* in use as a monitor table */
+#define SH_type_unused (15U)
+
+/*
+ * Definitions for the shadow_flags field in page_info.
+ * These flags are stored on *guest* pages...
+ * Bits 1-13 are encodings for the shadow types.
+ */
+#define SHF_page_type_mask \
+ (((1u << (SH_type_max_shadow + 1u)) - 1u) - \
+ ((1u << SH_type_min_shadow) - 1u))
+
+#define SHF_L1_32 (1u << SH_type_l1_32_shadow)
+#define SHF_FL1_32 (1u << SH_type_fl1_32_shadow)
+#define SHF_L2_32 (1u << SH_type_l2_32_shadow)
+#define SHF_L1_PAE (1u << SH_type_l1_pae_shadow)
+#define SHF_FL1_PAE (1u << SH_type_fl1_pae_shadow)
+#define SHF_L2_PAE (1u << SH_type_l2_pae_shadow)
+#define SHF_L2H_PAE (1u << SH_type_l2h_pae_shadow)
+#define SHF_L1_64 (1u << SH_type_l1_64_shadow)
+#define SHF_FL1_64 (1u << SH_type_fl1_64_shadow)
+#define SHF_L2_64 (1u << SH_type_l2_64_shadow)
+#define SHF_L3_64 (1u << SH_type_l3_64_shadow)
+#define SHF_L4_64 (1u << SH_type_l4_64_shadow)
+
+/* Used for hysteresis when automatically unhooking mappings on fork/exit */
+#define SHF_unhooked_mappings (1u<<31)
+
/******************************************************************************
* Various function declarations
@@ -173,12 +264,14 @@ void sh_install_xen_entries_in_l2(struct
// Override mfn_to_page from asm/page.h, which was #include'd above,
// in order to make it work with our mfn type.
#undef mfn_to_page
-#define mfn_to_page(_mfn) (frame_table + mfn_x(_mfn))
+#define mfn_to_page(_m) (frame_table + mfn_x(_m))
+#define mfn_to_shadow_page(_m) ((struct shadow_page_info *)mfn_to_page(_m))
// Override page_to_mfn from asm/page.h, which was #include'd above,
// in order to make it work with our mfn type.
#undef page_to_mfn
#define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
+#define shadow_page_to_mfn(_spg) (page_to_mfn((struct page_info *)_spg))
// Override mfn_valid from asm/page.h, which was #include'd above,
// in order to make it work with our mfn type.
@@ -189,28 +282,24 @@ static inline void *
static inline void *
sh_map_domain_page(mfn_t mfn)
{
- /* XXX Using the monitor-table as a map will happen here */
return map_domain_page(mfn_x(mfn));
}
static inline void
sh_unmap_domain_page(void *p)
{
- /* XXX Using the monitor-table as a map will happen here */
unmap_domain_page(p);
}
static inline void *
sh_map_domain_page_global(mfn_t mfn)
{
- /* XXX Using the monitor-table as a map will happen here */
return map_domain_page_global(mfn_x(mfn));
}
static inline void
sh_unmap_domain_page_global(void *p)
{
- /* XXX Using the monitor-table as a map will happen here */
unmap_domain_page_global(p);
}
@@ -253,8 +342,7 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
/**************************************************************************/
-/* Shadow-page refcounting. See comment in shadow-common.c about the
- * use of struct page_info fields for shadow pages */
+/* Shadow-page refcounting. */
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
@@ -264,27 +352,26 @@ static inline void sh_get_ref(mfn_t smfn
static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
{
u32 x, nx;
- struct page_info *page = mfn_to_page(smfn);
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
ASSERT(mfn_valid(smfn));
- x = page->count_info & PGC_SH_count_mask;
+ x = sp->count;
nx = x + 1;
- if ( unlikely(nx & ~PGC_SH_count_mask) )
+ if ( unlikely(nx >= 1U<<26) )
{
SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
- page->u.inuse.type_info, mfn_x(smfn));
+ sp->backpointer, mfn_x(smfn));
domain_crash_synchronous();
}
/* Guarded by the shadow lock, so no need for atomic update */
- page->count_info &= ~PGC_SH_count_mask;
- page->count_info |= nx;
+ sp->count = nx;
/* We remember the first shadow entry that points to each shadow. */
- if ( entry_pa != 0 && page->up == 0 )
- page->up = entry_pa;
+ if ( entry_pa != 0 && sp->up == 0 )
+ sp->up = entry_pa;
}
@@ -293,31 +380,27 @@ static inline void sh_put_ref(struct vcp
static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
{
u32 x, nx;
- struct page_info *page = mfn_to_page(smfn);
+ struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
ASSERT(mfn_valid(smfn));
- ASSERT(page_get_owner(page) == NULL);
+ ASSERT(sp->mbz == 0);
/* If this is the entry in the up-pointer, remove it */
- if ( entry_pa != 0 && page->up == entry_pa )
- page->up = 0;
-
- x = page->count_info & PGC_SH_count_mask;
+ if ( entry_pa != 0 && sp->up == entry_pa )
+ sp->up = 0;
+
+ x = sp->count;
nx = x - 1;
if ( unlikely(x == 0) )
{
- SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%"
- PRtype_info "\n",
- mfn_x(smfn),
- page->count_info & PGC_SH_count_mask,
- page->u.inuse.type_info);
+ SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
+ mfn_x(smfn), sp->count, sp->type);
domain_crash_synchronous();
}
/* Guarded by the shadow lock, so no need for atomic update */
- page->count_info &= ~PGC_SH_count_mask;
- page->count_info |= nx;
+ sp->count = nx;
if ( unlikely(nx == 0) )
sh_destroy_shadow(v, smfn);
@@ -327,27 +410,27 @@ static inline void sh_put_ref(struct vcp
/* Pin a shadow page: take an extra refcount and set the pin bit. */
static inline void sh_pin(mfn_t smfn)
{
- struct page_info *page;
+ struct shadow_page_info *sp;
ASSERT(mfn_valid(smfn));
- page = mfn_to_page(smfn);
- if ( !(page->count_info & PGC_SH_pinned) )
+ sp = mfn_to_shadow_page(smfn);
+ if ( !(sp->pinned) )
{
sh_get_ref(smfn, 0);
- page->count_info |= PGC_SH_pinned;
+ sp->pinned = 1;
}
}
/* Unpin a shadow page: unset the pin bit and release the extra ref. */
static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
{
- struct page_info *page;
+ struct shadow_page_info *sp;
ASSERT(mfn_valid(smfn));
- page = mfn_to_page(smfn);
- if ( page->count_info & PGC_SH_pinned )
+ sp = mfn_to_shadow_page(smfn);
+ if ( sp->pinned )
{
- page->count_info &= ~PGC_SH_pinned;
+ sp->pinned = 0;
sh_put_ref(v, smfn, 0);
}
}
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/arch/x86/mm/shadow/types.h Thu Nov 23 17:40:28 2006 +0000
@@ -281,9 +281,9 @@ static inline guest_l2e_t guest_l2e_from
#define guest_l2_table_offset(a) l2_table_offset_32(a)
/* The shadow types needed for the various levels. */
-#define PGC_SH_l1_shadow PGC_SH_l1_32_shadow
-#define PGC_SH_l2_shadow PGC_SH_l2_32_shadow
-#define PGC_SH_fl1_shadow PGC_SH_fl1_32_shadow
+#define SH_type_l1_shadow SH_type_l1_32_shadow
+#define SH_type_l2_shadow SH_type_l2_32_shadow
+#define SH_type_fl1_shadow SH_type_fl1_32_shadow
#else /* GUEST_PAGING_LEVELS != 2 */
@@ -381,16 +381,16 @@ static inline guest_l4e_t guest_l4e_from
/* The shadow types needed for the various levels. */
#if GUEST_PAGING_LEVELS == 3
-#define PGC_SH_l1_shadow PGC_SH_l1_pae_shadow
-#define PGC_SH_fl1_shadow PGC_SH_fl1_pae_shadow
-#define PGC_SH_l2_shadow PGC_SH_l2_pae_shadow
-#define PGC_SH_l2h_shadow PGC_SH_l2h_pae_shadow
+#define SH_type_l1_shadow SH_type_l1_pae_shadow
+#define SH_type_fl1_shadow SH_type_fl1_pae_shadow
+#define SH_type_l2_shadow SH_type_l2_pae_shadow
+#define SH_type_l2h_shadow SH_type_l2h_pae_shadow
#else
-#define PGC_SH_l1_shadow PGC_SH_l1_64_shadow
-#define PGC_SH_fl1_shadow PGC_SH_fl1_64_shadow
-#define PGC_SH_l2_shadow PGC_SH_l2_64_shadow
-#define PGC_SH_l3_shadow PGC_SH_l3_64_shadow
-#define PGC_SH_l4_shadow PGC_SH_l4_64_shadow
+#define SH_type_l1_shadow SH_type_l1_64_shadow
+#define SH_type_fl1_shadow SH_type_fl1_64_shadow
+#define SH_type_l2_shadow SH_type_l2_64_shadow
+#define SH_type_l3_shadow SH_type_l3_64_shadow
+#define SH_type_l4_shadow SH_type_l4_64_shadow
#endif
#endif /* GUEST_PAGING_LEVELS != 2 */
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/include/asm-x86/mm.h Thu Nov 23 17:40:28 2006 +0000
@@ -20,11 +20,7 @@ struct page_info
struct page_info
{
/* Each frame can be threaded onto a doubly-linked list. */
- union {
- struct list_head list;
- /* Shadow uses this field as an up-pointer in lower-level shadows */
- paddr_t up;
- };
+ struct list_head list;
/* Reference count and various PGC_xxx flags and fields. */
u32 count_info;
@@ -59,11 +55,11 @@ struct page_info
u32 tlbflush_timestamp;
/*
- * Guest pages with a shadow. This does not conflict with
+ * Guest pages with a shadow. This does not conflict with
* tlbflush_timestamp since page table pages are explicitly not
* tracked for TLB-flush avoidance when a guest runs in shadow mode.
*/
- u32 shadow_flags;
+ unsigned long shadow_flags;
};
};
@@ -102,38 +98,6 @@ struct page_info
#define PGC_page_table (1U<<_PGC_page_table)
/* 29-bit count of references to this frame. */
#define PGC_count_mask ((1U<<29)-1)
-
-/* shadow uses the count_info on shadow pages somewhat differently */
-/* NB: please coordinate any changes here with the SHF's in shadow.h */
-#define PGC_SH_none (0U<<28) /* on the shadow free list */
-#define PGC_SH_min_shadow (1U<<28)
-#define PGC_SH_l1_32_shadow (1U<<28) /* shadowing a 32-bit L1 guest page */
-#define PGC_SH_fl1_32_shadow (2U<<28) /* L1 shadow for a 32b 4M superpage */
-#define PGC_SH_l2_32_shadow (3U<<28) /* shadowing a 32-bit L2 guest page */
-#define PGC_SH_l1_pae_shadow (4U<<28) /* shadowing a pae L1 page */
-#define PGC_SH_fl1_pae_shadow (5U<<28) /* L1 shadow for pae 2M superpg */
-#define PGC_SH_l2_pae_shadow (6U<<28) /* shadowing a pae L2-low page */
-#define PGC_SH_l2h_pae_shadow (7U<<28) /* shadowing a pae L2-high page */
-#define PGC_SH_l1_64_shadow (8U<<28) /* shadowing a 64-bit L1 page */
-#define PGC_SH_fl1_64_shadow (9U<<28) /* L1 shadow for 64-bit 2M superpg */
-#define PGC_SH_l2_64_shadow (10U<<28) /* shadowing a 64-bit L2 page */
-#define PGC_SH_l3_64_shadow (11U<<28) /* shadowing a 64-bit L3 page */
-#define PGC_SH_l4_64_shadow (12U<<28) /* shadowing a 64-bit L4 page */
-#define PGC_SH_max_shadow (12U<<28)
-#define PGC_SH_p2m_table (13U<<28) /* in use as the p2m table */
-#define PGC_SH_monitor_table (14U<<28) /* in use as a monitor table */
-#define PGC_SH_unused (15U<<28)
-
-#define PGC_SH_type_mask (15U<<28)
-#define PGC_SH_type_shift 28
-
-#define PGC_SH_pinned (1U<<27)
-
-#define _PGC_SH_log_dirty 26
-#define PGC_SH_log_dirty (1U<<26)
-
-/* 26 bit ref count for shadow pages */
-#define PGC_SH_count_mask ((1U<<26) - 1)
/* We trust the slab allocator in slab.c, and our use of it. */
#define PageSlab(page) (1)
diff -r a8d2b1393b76 -r 6f0d8434d23f xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Tue Nov 21 18:09:23 2006 -0800
+++ b/xen/include/asm-x86/shadow.h Thu Nov 23 17:40:28 2006 +0000
@@ -577,32 +577,6 @@ shadow_guest_physmap_remove_page(struct
shadow_guest_physmap_remove_page(struct domain *d, unsigned long gfn,
unsigned long mfn);
-/*
- * Definitions for the shadow_flags field in page_info.
- * These flags are stored on *guest* pages...
- * Bits 1-13 are encodings for the shadow types.
- */
-#define PGC_SH_type_to_index(_type) ((_type) >> PGC_SH_type_shift)
-#define SHF_page_type_mask \
- (((1u << (PGC_SH_type_to_index(PGC_SH_max_shadow) + 1u)) - 1u) - \
- ((1u << PGC_SH_type_to_index(PGC_SH_min_shadow)) - 1u))
-
-#define SHF_L1_32 (1u << PGC_SH_type_to_index(PGC_SH_l1_32_shadow))
-#define SHF_FL1_32 (1u << PGC_SH_type_to_index(PGC_SH_fl1_32_shadow))
-#define SHF_L2_32 (1u << PGC_SH_type_to_index(PGC_SH_l2_32_shadow))
-#define SHF_L1_PAE (1u << PGC_SH_type_to_index(PGC_SH_l1_pae_shadow))
-#define SHF_FL1_PAE (1u << PGC_SH_type_to_index(PGC_SH_fl1_pae_shadow))
-#define SHF_L2_PAE (1u << PGC_SH_type_to_index(PGC_SH_l2_pae_shadow))
-#define SHF_L2H_PAE (1u << PGC_SH_type_to_index(PGC_SH_l2h_pae_shadow))
-#define SHF_L1_64 (1u << PGC_SH_type_to_index(PGC_SH_l1_64_shadow))
-#define SHF_FL1_64 (1u << PGC_SH_type_to_index(PGC_SH_fl1_64_shadow))
-#define SHF_L2_64 (1u << PGC_SH_type_to_index(PGC_SH_l2_64_shadow))
-#define SHF_L3_64 (1u << PGC_SH_type_to_index(PGC_SH_l3_64_shadow))
-#define SHF_L4_64 (1u << PGC_SH_type_to_index(PGC_SH_l4_64_shadow))
-
-/* Used for hysteresis when automatically unhooking mappings on fork/exit */
-#define SHF_unhooked_mappings (1u<<31)
-
/*
* Allocation of shadow pages
*/
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|