[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 4/9] x86/shadow: drop a few uses of mfn_valid()
v->arch.paging.shadow.shadow_table[], v->arch.paging.shadow.oos[], v->arch.paging.shadow.oos_{snapshot[],fixup[].smfn[]} as well as the hash table are all only ever written with valid MFNs or INVALID_MFN. Avoid the somewhat expensive mfn_valid() when checking MFNs coming from these arrays. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- There are many more uses which can likely be replaced, but I think we're better off doing this in piecemeal fashion. --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -171,7 +171,7 @@ static void sh_oos_audit(struct domain * for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) { mfn_t *oos = v->arch.paging.shadow.oos; - if ( !mfn_valid(oos[idx]) ) + if ( mfn_eq(oos[idx], INVALID_MFN) ) continue; expected_idx = mfn_x(oos[idx]) % SHADOW_OOS_PAGES; @@ -327,8 +327,7 @@ void oos_fixup_add(struct domain *d, mfn int i; for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ ) { - if ( mfn_valid(oos_fixup[idx].smfn[i]) - && mfn_eq(oos_fixup[idx].smfn[i], smfn) + if ( mfn_eq(oos_fixup[idx].smfn[i], smfn) && (oos_fixup[idx].off[i] == off) ) return; } @@ -461,7 +460,7 @@ static void oos_hash_add(struct vcpu *v, idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; oidx = idx; - if ( mfn_valid(oos[idx]) + if ( !mfn_eq(oos[idx], INVALID_MFN) && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx ) { /* Punt the current occupant into the next slot */ @@ -470,8 +469,8 @@ static void oos_hash_add(struct vcpu *v, swap = 1; idx = (idx + 1) % SHADOW_OOS_PAGES; } - if ( mfn_valid(oos[idx]) ) - { + if ( !mfn_eq(oos[idx], INVALID_MFN) ) + { /* Crush the current occupant. */ _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]); perfc_incr(shadow_unsync_evict); @@ -607,7 +606,7 @@ void sh_resync_all(struct vcpu *v, int s /* First: resync all of this vcpu's oos pages */ for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) - if ( mfn_valid(oos[idx]) ) + if ( !mfn_eq(oos[idx], INVALID_MFN) ) { /* Write-protect and sync contents */ _sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]); @@ -630,7 +629,7 @@ void sh_resync_all(struct vcpu *v, int s for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) { - if ( !mfn_valid(oos[idx]) ) + if ( mfn_eq(oos[idx], INVALID_MFN) ) continue; if ( skip ) @@ -2183,7 +2182,7 @@ void sh_remove_shadows(struct domain *d, !(pg->shadow_flags & (1 << t)) ) \ break; \ smfn = shadow_hash_lookup(d, mfn_x(gmfn), t); \ - if ( unlikely(!mfn_valid(smfn)) ) \ + if ( unlikely(mfn_eq(smfn, INVALID_MFN)) ) \ { \ printk(XENLOG_G_ERR "gmfn %"PRI_mfn" has flags %#x" \ " but no type-%#x shadow\n", \ @@ -2751,7 +2750,7 @@ void shadow_teardown(struct domain *d, b int i; mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) - if ( mfn_valid(oos_snapshot[i]) ) + if ( !mfn_eq(oos_snapshot[i], INVALID_MFN) ) { shadow_free(d, oos_snapshot[i]); oos_snapshot[i] = INVALID_MFN; @@ -2934,7 +2933,7 @@ static int shadow_one_bit_disable(struct int i; mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) - if ( mfn_valid(oos_snapshot[i]) ) + if ( !mfn_eq(oos_snapshot[i], INVALID_MFN) ) { shadow_free(d, oos_snapshot[i]); oos_snapshot[i] = INVALID_MFN; --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -110,7 +110,7 @@ get_fl1_shadow_status(struct domain *d, /* Look for FL1 shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(d, gfn_x(gfn), SH_type_fl1_shadow); - ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head); + ASSERT(mfn_eq(smfn, INVALID_MFN) || mfn_to_page(smfn)->u.sh.head); return smfn; } @@ -2680,7 +2680,7 @@ static int cf_check sh_page_fault( mfn_t smfn = pagetable_get_mfn( v->arch.paging.shadow.shadow_table[i]); - if ( mfn_valid(smfn) && (mfn_x(smfn) != 0) ) + if ( mfn_x(smfn) ) { used |= (mfn_to_page(smfn)->v.sh.back == mfn_x(gmfn)); @@ -3824,7 +3824,7 @@ static void cf_check sh_pagetable_dying( : shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow); } - if ( mfn_valid(smfn) ) + if ( !mfn_eq(smfn, INVALID_MFN) ) { gmfn = _mfn(mfn_to_page(smfn)->v.sh.back); mfn_to_page(gmfn)->pagetable_dying = true; @@ -3867,7 +3867,7 @@ static void cf_check sh_pagetable_dying( smfn = shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l4_64_shadow); #endif - if ( mfn_valid(smfn) ) + if ( !mfn_eq(smfn, INVALID_MFN) ) { mfn_to_page(gmfn)->pagetable_dying = true; shadow_unhook_mappings(d, smfn, 1/* user pages only */); --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -769,8 +769,10 @@ get_shadow_status(struct domain *d, mfn_ /* Look for shadows in the hash table */ { mfn_t smfn = shadow_hash_lookup(d, mfn_x(gmfn), shadow_type); - ASSERT(!mfn_valid(smfn) || mfn_to_page(smfn)->u.sh.head); + + ASSERT(mfn_eq(smfn, INVALID_MFN) || mfn_to_page(smfn)->u.sh.head); perfc_incr(shadow_get_shadow_status); + return smfn; }
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |