diff -r fa1ad484bf0b xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Mar 31 14:28:31 2010 +0200 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Mar 31 14:44:39 2010 +0200 @@ -92,7 +92,7 @@ static int vmx_domain_initialise(struct static void vmx_domain_destroy(struct domain *d) { - if ( d->arch.hvm_domain.hap_enabled ) + if ( paging_mode_hap(d) ) on_each_cpu(__ept_sync_domain, d, 1); vmx_free_vlapic_mapping(d); } @@ -678,7 +678,7 @@ static void vmx_ctxt_switch_to(struct vc if ( old_cr4 != new_cr4 ) write_cr4(new_cr4); - if ( d->arch.hvm_domain.hap_enabled ) + if ( paging_mode_hap(d) ) { unsigned int cpu = smp_processor_id(); /* Test-and-test-and-set this CPU in the EPT-is-synced mask. */ @@ -1222,7 +1222,7 @@ static void __ept_sync_domain(void *info void ept_sync_domain(struct domain *d) { /* Only if using EPT and this domain has some VCPUs to dirty. */ - if ( !d->arch.hvm_domain.hap_enabled || !d->vcpu || !d->vcpu[0] ) + if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] ) return; ASSERT(local_irq_is_enabled()); diff -r fa1ad484bf0b xen/arch/x86/mm/hap/hap.c --- a/xen/arch/x86/mm/hap/hap.c Wed Mar 31 14:28:31 2010 +0200 +++ b/xen/arch/x86/mm/hap/hap.c Wed Mar 31 14:44:39 2010 +0200 @@ -550,8 +550,13 @@ int hap_enable(struct domain *d, u32 mod { unsigned int old_pages; int rv = 0; + uint32_t oldmode; domain_pause(d); + + oldmode = d->arch.paging.mode; + d->arch.paging.mode = mode | PG_HAP_enable; + /* error check */ if ( (d == current->domain) ) { @@ -582,9 +587,9 @@ int hap_enable(struct domain *d, u32 mod goto out; } - d->arch.paging.mode = mode | PG_HAP_enable; - out: + if (rv) + d->arch.paging.mode = oldmode; domain_unpause(d); return rv; } diff -r fa1ad484bf0b xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c Wed Mar 31 14:28:31 2010 +0200 +++ b/xen/arch/x86/mm/mem_sharing.c Wed Mar 31 14:44:39 2010 +0200 @@ -44,7 +44,7 @@ static void mem_sharing_audit(void); #define hap_enabled(d) \ - (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled) + (is_hvm_domain(d) && paging_mode_hap(d)) #define mem_sharing_enabled(d) \ (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled) diff -r fa1ad484bf0b xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c Wed Mar 31 14:28:31 2010 +0200 +++ b/xen/arch/x86/mm/p2m.c Wed Mar 31 14:44:39 2010 +0200 @@ -1231,7 +1231,7 @@ p2m_set_entry(struct domain *d, unsigned if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, L3_PAGETABLE_SHIFT - PAGE_SHIFT, ((CONFIG_PAGING_LEVELS == 3) - ? (d->arch.hvm_domain.hap_enabled ? 4 : 8) + ? (paging_mode_hap(d) ? 4 : 8) : L3_PAGETABLE_ENTRIES), PGT_l2_page_table) ) goto out; @@ -1568,7 +1568,7 @@ int p2m_init(struct domain *d) p2m->get_entry_current = p2m_gfn_to_mfn_current; p2m->change_entry_type_global = p2m_change_type_global; - if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled && + if ( is_hvm_domain(d) && paging_mode_hap(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ) ept_p2m_init(d); @@ -1595,7 +1595,7 @@ int set_p2m_entry(struct domain *d, unsi while ( todo ) { - if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled ) + if ( is_hvm_domain(d) && paging_mode_hap(d) ) order = (((gfn | mfn_x(mfn) | todo) & (SUPERPAGE_PAGES - 1)) == 0) ? 9 : 0; else