ChangeSet 1.1258, 2005/03/23 16:55:23+00:00, mafetter@xxxxxxxxxxxxxxxx
Remember min and max indices for valid entries in shadow L1 tables.
Use these to minimize the revalidation effort.
Signed-off-by: michael.fetterman@xxxxxxxxxxxx
arch/x86/shadow.c | 39 +++++++++++++++++++++++++++------------
include/asm-x86/shadow.h | 35 ++++++++++++++++++++++++++++++-----
2 files changed, 57 insertions(+), 17 deletions(-)
diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c 2005-04-05 12:17:39 -04:00
+++ b/xen/arch/x86/shadow.c 2005-04-05 12:17:39 -04:00
@@ -174,7 +174,8 @@
* don't want to let those disappear just because no CR3 is currently pointing
* at it.
*
- * tlbflush_timestamp holds a pickled pointer to the domain.
+ * tlbflush_timestamp holds a min & max index of valid page table entries
+ * within the shadow page.
*/
static inline unsigned long
@@ -204,7 +205,7 @@
ASSERT( (gmfn & ~PGT_mfn_mask) == 0 );
page->u.inuse.type_info = psh_type | gmfn;
page->count_info = 0;
- page->tlbflush_timestamp = pickle_domptr(d);
+ page->tlbflush_timestamp = 0;
switch ( psh_type )
{
@@ -325,8 +326,8 @@
void free_shadow_page(unsigned long smfn)
{
struct pfn_info *page = &frame_table[smfn];
- struct domain *d = unpickle_domptr(page->tlbflush_timestamp);
unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
+ struct domain *d = page_get_owner(pfn_to_page(gmfn));
unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
unsigned long type = page->u.inuse.type_info & PGT_type_mask;
@@ -1431,25 +1432,34 @@
unsigned long sl1e;
int index = l1_table_offset(va);
-
- l1pte_propagate_from_guest(d, gpl1e[index], &sl1e);
- if ( (sl1e & _PAGE_PRESENT) &&
- !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
- sl1e = 0;
- spl1e[index] = sl1e;
+ int min = 1, max = 0;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
- if ( i == index )
- continue;
l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
if ( (sl1e & _PAGE_PRESENT) &&
!shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
sl1e = 0;
if ( sl1e == 0 )
+ {
+ // First copy entries from 0 until first invalid.
+ // Then copy entries from index until first invalid.
+ //
+ if ( i < index ) {
+ i = index - 1;
+ continue;
+ }
break;
+ }
spl1e[i] = sl1e;
+ if ( unlikely(i < min) )
+ min = i;
+ if ( likely(i > max) )
+ max = i;
}
+
+ frame_table[sl1mfn].tlbflush_timestamp =
+ SHADOW_ENCODE_MIN_MAX(min, max);
}
}
@@ -1996,6 +2006,8 @@
unsigned long *guest, *shadow, *snapshot;
int need_flush = 0, external = shadow_mode_external(d);
int unshadow;
+ unsigned long min_max;
+ int min, max;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
@@ -2020,7 +2032,10 @@
switch ( stype ) {
case PGT_l1_shadow:
- for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+ min_max = pfn_to_page(smfn)->tlbflush_timestamp;
+ min = SHADOW_MIN(min_max);
+ max = SHADOW_MAX(min_max);
+ for ( i = min; i <= max; i++ )
{
unsigned new_pte = guest[i];
if ( new_pte != snapshot[i] )
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h 2005-04-05 12:17:39 -04:00
+++ b/xen/include/asm-x86/shadow.h 2005-04-05 12:17:39 -04:00
@@ -52,6 +52,10 @@
#define shadow_lock(_d) do {
ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock));
spin_lock(&(_d)->arch.shadow_lock); } while (0)
#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
+#define SHADOW_ENCODE_MIN_MAX(_min, _max) (((L1_PAGETABLE_ENTRIES - (_max)) <<
16) | (_min))
+#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
+#define SHADOW_MAX(_encoded) (L1_PAGETABLE_ENTRIES - ((_encoded) >> 16))
+
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
extern int shadow_fault(unsigned long va, struct xen_regs *regs);
@@ -187,13 +191,9 @@
/************************************************************************/
struct shadow_status {
- unsigned long gpfn_and_flags; /* Guest pfn plus flags. */
struct shadow_status *next; /* Pull-to-front list per hash bucket. */
+ unsigned long gpfn_and_flags; /* Guest pfn plus flags. */
unsigned long smfn; /* Shadow mfn. */
-
- // Pull-to-front list of L1s/L2s from which we check when removing
- // write access to a page.
- //struct list_head next_to_check;
};
#define shadow_ht_extra_size 128
@@ -1290,6 +1290,29 @@
/************************************************************************/
+void static inline
+shadow_update_min_max(unsigned long smfn, int index)
+{
+ struct pfn_info *sl1page = pfn_to_page(smfn);
+ unsigned long min_max = sl1page->tlbflush_timestamp;
+ int min = SHADOW_MIN(min_max);
+ int max = SHADOW_MAX(min_max);
+ int update = 0;
+
+ if ( index < min )
+ {
+ min = index;
+ update = 1;
+ }
+ if ( index > max )
+ {
+ max = index;
+ update = 1;
+ }
+ if ( update )
+ sl1page->tlbflush_timestamp = SHADOW_ENCODE_MIN_MAX(min, max);
+}
+
extern void shadow_map_l1_into_current_l2(unsigned long va);
void static inline
@@ -1357,6 +1380,8 @@
}
shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
+
+ shadow_update_min_max(sl2e >> PAGE_SHIFT, l1_table_offset(va));
}
/************************************************************************/
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|