# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ba50c9d1271e8d32a80c24eb87f7d04c68ca832f
# Parent dca4893b0b9f14a99ea46abcadcefc3246707585
Clean up mark_dirty() implementation to check for log-dirty
status internally, with the shadow_lock held for consistency.
Callers no need to check log-dirty status themselves.
Also, add dirty logging to alloc_page_type/free_page_type, so
that we see page tables coming and going during live relocation.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r dca4893b0b9f -r ba50c9d1271e xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Nov 24 14:08:27 2005
+++ b/xen/arch/x86/mm.c Thu Nov 24 14:38:33 2005
@@ -1289,6 +1289,11 @@
int alloc_page_type(struct pfn_info *page, unsigned long type)
{
+ struct domain *owner = page_get_owner(page);
+
+ if ( owner != NULL )
+ mark_dirty(owner, page_to_pfn(page));
+
switch ( type & PGT_type_mask )
{
case PGT_l1_page_table:
@@ -1318,16 +1323,14 @@
struct domain *owner = page_get_owner(page);
unsigned long gpfn;
- if ( owner != NULL )
- {
+ if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
+ {
+ mark_dirty(owner, page_to_pfn(page));
if ( unlikely(shadow_mode_refcounts(owner)) )
return;
- if ( unlikely(shadow_mode_enabled(owner)) )
- {
- gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
- ASSERT(VALID_M2P(gpfn));
- remove_shadow(owner, gpfn, type & PGT_type_mask);
- }
+ gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
+ ASSERT(VALID_M2P(gpfn));
+ remove_shadow(owner, gpfn, type & PGT_type_mask);
}
switch ( type & PGT_type_mask )
@@ -2203,8 +2206,7 @@
{
shadow_lock(d);
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, mfn);
+ __mark_dirty(d, mfn);
if ( page_is_page_table(page) &&
!page_out_of_sync(page) )
@@ -2263,13 +2265,7 @@
set_pfn_from_mfn(mfn, gpfn);
okay = 1;
- /*
- * If in log-dirty mode, mark the corresponding
- * page as dirty.
- */
- if ( unlikely(shadow_mode_log_dirty(FOREIGNDOM)) &&
- mark_dirty(FOREIGNDOM, mfn) )
- FOREIGNDOM->arch.shadow_dirty_block_count++;
+ mark_dirty(FOREIGNDOM, mfn);
put_page(&frame_table[mfn]);
break;
@@ -2841,8 +2837,7 @@
{
shadow_lock(dom);
- if ( shadow_mode_log_dirty(dom) )
- __mark_dirty(dom, mfn);
+ __mark_dirty(dom, mfn);
if ( page_is_page_table(page) && !page_out_of_sync(page) )
shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
diff -r dca4893b0b9f -r ba50c9d1271e xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c Thu Nov 24 14:08:27 2005
+++ b/xen/arch/x86/shadow.c Thu Nov 24 14:38:33 2005
@@ -1858,8 +1858,7 @@
SH_VVLOG("l1pte_write_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
l1e_get_intpte(spte), l1e_get_intpte(gpte));
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, gmfn);
+ __mark_dirty(d, gmfn);
if ( mfn_is_page_table(gmfn) )
shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
@@ -2021,9 +2020,7 @@
domain_crash_synchronous();
}
- // if necessary, record the page table page as dirty
- if ( unlikely(shadow_mode_log_dirty(d)) )
- __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
+ __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
}
shadow_set_l1e(va, spte, 1);
@@ -2082,8 +2079,7 @@
* the PTE in the PT-holding page. We need the machine frame number
* for this.
*/
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, va_to_l1mfn(v, va));
+ __mark_dirty(d, va_to_l1mfn(v, va));
shadow_unlock(d);
@@ -3189,11 +3185,7 @@
l1e_remove_flags(sl1e, _PAGE_RW);
}
} else {
- /* log dirty*/
- /*
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, gmfn);
- */
+ /* __mark_dirty(d, gmfn); */
}
// printk("<%s> gpfn: %lx, mfn: %lx, sl1e: %lx\n", __func__, gpfn, mfn,
l1e_get_intpte(sl1e));
/* The shadow entrys need setup before shadow_mark_va_out_of_sync()*/
@@ -3476,9 +3468,7 @@
if (unlikely(!__guest_set_l1e(v, va, &gl1e)))
domain_crash_synchronous();
- // if necessary, record the page table page as dirty
- if ( unlikely(shadow_mode_log_dirty(d)) )
- __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
+ __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
}
shadow_set_l1e_64(va, (pgentry_64_t *)&sl1e, 1);
diff -r dca4893b0b9f -r ba50c9d1271e xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c Thu Nov 24 14:08:27 2005
+++ b/xen/arch/x86/shadow32.c Thu Nov 24 14:38:33 2005
@@ -1274,8 +1274,6 @@
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- d->arch.shadow_dirty_net_count = 0;
- d->arch.shadow_dirty_block_count = 0;
break;
@@ -1284,13 +1282,9 @@
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count;
- sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- d->arch.shadow_dirty_net_count = 0;
- d->arch.shadow_dirty_block_count = 0;
if ( (sc->dirty_bitmap == NULL) ||
(d->arch.shadow_dirty_bitmap == NULL) )
@@ -1327,9 +1321,6 @@
case DOM0_SHADOW_CONTROL_OP_PEEK:
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count;
- sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
-
if ( (sc->dirty_bitmap == NULL) ||
(d->arch.shadow_dirty_bitmap == NULL) )
@@ -2738,9 +2729,7 @@
domain_crash_synchronous();
}
- // if necessary, record the page table page as dirty
- if ( unlikely(shadow_mode_log_dirty(d)) )
- __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
+ __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
}
shadow_set_l1e(va, spte, 1);
@@ -2837,8 +2826,6 @@
shadow_lock(d);
- //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void
*)l1e_get_intpte(val));
-
// This is actually overkill - we don't need to sync the L1 itself,
// just everything involved in getting to this L1 (i.e. we need
// linear_pg_table[l1_linear_offset(va)] to be in sync)...
@@ -2853,10 +2840,8 @@
* the PTE in the PT-holding page. We need the machine frame number
* for this.
*/
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, va_to_l1mfn(v, va));
-
-// out:
+ __mark_dirty(d, va_to_l1mfn(v, va));
+
shadow_unlock(d);
return rc;
diff -r dca4893b0b9f -r ba50c9d1271e xen/arch/x86/shadow_public.c
--- a/xen/arch/x86/shadow_public.c Thu Nov 24 14:08:27 2005
+++ b/xen/arch/x86/shadow_public.c Thu Nov 24 14:38:33 2005
@@ -1183,8 +1183,6 @@
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- d->arch.shadow_dirty_net_count = 0;
- d->arch.shadow_dirty_block_count = 0;
break;
@@ -1193,15 +1191,10 @@
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count;
- sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
d->arch.shadow_fault_count = 0;
d->arch.shadow_dirty_count = 0;
- d->arch.shadow_dirty_net_count = 0;
- d->arch.shadow_dirty_block_count = 0;
-
if ( (sc->dirty_bitmap == NULL) ||
(d->arch.shadow_dirty_bitmap == NULL) )
{
@@ -1236,8 +1229,6 @@
case DOM0_SHADOW_CONTROL_OP_PEEK:
sc->stats.fault_count = d->arch.shadow_fault_count;
sc->stats.dirty_count = d->arch.shadow_dirty_count;
- sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count;
- sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
if ( (sc->dirty_bitmap == NULL) ||
(d->arch.shadow_dirty_bitmap == NULL) )
diff -r dca4893b0b9f -r ba50c9d1271e xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Thu Nov 24 14:08:27 2005
+++ b/xen/include/asm-x86/domain.h Thu Nov 24 14:38:33 2005
@@ -49,8 +49,6 @@
unsigned int shadow_fault_count;
unsigned int shadow_dirty_count;
- unsigned int shadow_dirty_net_count;
- unsigned int shadow_dirty_block_count;
/* full shadow mode */
struct out_of_sync_entry *out_of_sync; /* list of out-of-sync pages */
diff -r dca4893b0b9f -r ba50c9d1271e xen/include/asm-x86/grant_table.h
--- a/xen/include/asm-x86/grant_table.h Thu Nov 24 14:08:27 2005
+++ b/xen/include/asm-x86/grant_table.h Thu Nov 24 14:38:33 2005
@@ -33,10 +33,6 @@
#define gnttab_shared_mfn(d, t, i) \
((virt_to_phys((t)->shared) >> PAGE_SHIFT) + (i))
-#define gnttab_log_dirty(d, f) \
- do { \
- if ( unlikely(shadow_mode_log_dirty((d))) ) \
- mark_dirty((d), (f)); \
- } while ( 0 )
+#define gnttab_log_dirty(d, f) mark_dirty((d), (f))
#endif /* __ASM_GRANT_TABLE_H__ */
diff -r dca4893b0b9f -r ba50c9d1271e xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h Thu Nov 24 14:08:27 2005
+++ b/xen/include/asm-x86/shadow.h Thu Nov 24 14:38:33 2005
@@ -468,21 +468,18 @@
/************************************************************************/
-static inline int __mark_dirty(struct domain *d, unsigned long mfn)
+static inline void __mark_dirty(struct domain *d, unsigned long mfn)
{
unsigned long pfn;
- int rc = 0;
ASSERT(shadow_lock_is_acquired(d));
+
+ if ( likely(!shadow_mode_log_dirty(d)) || !VALID_MFN(mfn) )
+ return;
+
ASSERT(d->arch.shadow_dirty_bitmap != NULL);
- if ( !VALID_MFN(mfn) )
- return rc;
-
- // N.B. This doesn't use __mfn_to_gpfn().
- // This wants the nice compact set of PFNs from 0..domain's max,
- // which __mfn_to_gpfn() only returns for translated domains.
- //
+ /* We /really/ mean PFN here, even for non-translated guests. */
pfn = get_pfn_from_mfn(mfn);
/*
@@ -491,16 +488,13 @@
* Nothing to do here...
*/
if ( unlikely(IS_INVALID_M2P_ENTRY(pfn)) )
- return rc;
-
- if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
- {
- /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
- if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
- {
- d->arch.shadow_dirty_count++;
- rc = 1;
- }
+ return;
+
+ /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
+ if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) &&
+ !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
+ {
+ d->arch.shadow_dirty_count++;
}
#ifndef NDEBUG
else if ( mfn < max_page )
@@ -513,18 +507,17 @@
frame_table[mfn].u.inuse.type_info );
}
#endif
-
- return rc;
-}
-
-
-static inline int mark_dirty(struct domain *d, unsigned int mfn)
-{
- int rc;
- shadow_lock(d);
- rc = __mark_dirty(d, mfn);
- shadow_unlock(d);
- return rc;
+}
+
+
+static inline void mark_dirty(struct domain *d, unsigned int mfn)
+{
+ if ( unlikely(shadow_mode_log_dirty(d)) )
+ {
+ shadow_lock(d);
+ __mark_dirty(d, mfn);
+ shadow_unlock(d);
+ }
}
@@ -566,8 +559,7 @@
if ( unlikely(shadow_mode_translate(d)) )
update_hl2e(v, va);
- if ( unlikely(shadow_mode_log_dirty(d)) )
- __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
+ __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
}
static inline void
@@ -787,8 +779,7 @@
SH_VVLOG("l1pte_write_fault: updating spte=0x%" PRIpte " gpte=0x%" PRIpte,
l1e_get_intpte(spte), l1e_get_intpte(gpte));
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, gmfn);
+ __mark_dirty(d, gmfn);
if ( mfn_is_page_table(gmfn) )
shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
@@ -1325,46 +1316,6 @@
return pttype;
}
-/*
- * N.B. We can make this locking more fine grained (e.g., per shadow page) if
- * it ever becomes a problem, but since we need a spin lock on the hash table
- * anyway it's probably not worth being too clever.
- */
-static inline unsigned long get_shadow_status(
- struct domain *d, unsigned long gpfn, unsigned long stype)
-{
- unsigned long res;
-
- ASSERT(shadow_mode_enabled(d));
-
- /*
- * If we get here we know that some sort of update has happened to the
- * underlying page table page: either a PTE has been updated, or the page
- * has changed type. If we're in log dirty mode, we should set the
- * appropriate bit in the dirty bitmap.
- * N.B. The VA update path doesn't use this and is handled independently.
- *
- * XXX need to think this through for vmx guests, but probably OK
- */
-
- shadow_lock(d);
-
- if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, __gpfn_to_mfn(d, gpfn));
-
- if ( !(res = __shadow_status(d, gpfn, stype)) )
- shadow_unlock(d);
-
- return res;
-}
-
-
-static inline void put_shadow_status(struct domain *d)
-{
- shadow_unlock(d);
-}
-
-
static inline void delete_shadow_status(
struct domain *d, unsigned long gpfn, unsigned long gmfn, unsigned int
stype)
{
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|