# HG changeset patch # User cegger # Date 1311681725 -7200 support 1gb pages for guests Signed-off-by: Christoph Egger diff -r 4f2c59fb28e6 -r 6d15152fb59a xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2389,6 +2389,7 @@ void hvm_cpuid(unsigned int input, unsig unsigned int *ecx, unsigned int *edx) { struct vcpu *v = current; + struct domain *d = v->domain; unsigned int count = *ecx; if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) ) @@ -2397,7 +2398,7 @@ void hvm_cpuid(unsigned int input, unsig if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) ) return; - domain_cpuid(v->domain, input, *ecx, eax, ebx, ecx, edx); + domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx); switch ( input ) { @@ -2433,7 +2434,7 @@ void hvm_cpuid(unsigned int input, unsig { if ( !(v->arch.xcr0 & (1ULL << sub_leaf)) ) continue; - domain_cpuid(v->domain, input, sub_leaf, &_eax, &_ebx, &_ecx, + domain_cpuid(d, input, sub_leaf, &_eax, &_ebx, &_ecx, &_edx); if ( (_eax + _ebx) > *ebx ) *ebx = _eax + _ebx; @@ -2444,9 +2445,13 @@ void hvm_cpuid(unsigned int input, unsig case 0x80000001: /* We expose RDTSCP feature to guest only when tsc_mode == TSC_MODE_DEFAULT and host_tsc_is_safe() returns 1 */ - if ( v->domain->arch.tsc_mode != TSC_MODE_DEFAULT || + if ( d->arch.tsc_mode != TSC_MODE_DEFAULT || !host_tsc_is_safe() ) *edx &= ~cpufeat_mask(X86_FEATURE_RDTSCP); + /* Expose 1gb page feature for HVM HAP guests and hw support is + * available. */ + if (hvm_pse1gb_supported(d)) + *edx |= cpufeat_mask(X86_FEATURE_PAGE1GB); break; } } diff -r 4f2c59fb28e6 -r 6d15152fb59a xen/arch/x86/mm/guest_walk.c --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -134,7 +134,8 @@ guest_walk_tables(struct vcpu *v, struct guest_l4e_t *l4p; #endif uint32_t gflags, mflags, iflags, rc = 0; - int pse, smep; + int smep; + bool_t pse1G = 0, pse2M = 0; perfc_incr(guest_walk); memset(gw, 0, sizeof(*gw)); @@ -182,6 +183,27 @@ guest_walk_tables(struct vcpu *v, struct if ( rc & _PAGE_PRESENT ) goto out; + pse1G = (guest_supports_1G_superpages(v) && + (guest_l3e_get_flags(gw->l3e) & _PAGE_PSE)); + + if ( pse1G ) + { + /* Shadow paging doesn't support 1gb pages so a fake + * shadow l1 table entry is not needed. */ + gfn_t start = guest_l3e_get_gfn(gw->l3e); + +#define GUEST_L3_GFN_ALIGN (1 << (GUEST_L3_PAGETABLE_SHIFT - \ + GUEST_L2_PAGETABLE_SHIFT)) + if ( gfn_x(start) & (GUEST_L3_GFN_ALIGN - 1) & ~0x1 ) + { + rc |= _PAGE_INVALID_BITS; + } +#undef GUEST_L3_GFN_ALIGN + + gw->l2mfn = gw->l1mfn = _mfn(INVALID_MFN); + goto set_ad; + } + #else /* PAE only... */ /* Get the l3e and check its flag */ @@ -219,10 +241,10 @@ guest_walk_tables(struct vcpu *v, struct if ( rc & _PAGE_PRESENT ) goto out; - pse = (guest_supports_superpages(v) && + pse2M = (guest_supports_superpages(v) && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)); - if ( pse ) + if ( pse2M ) { /* Special case: this guest VA is in a PSE superpage, so there's * no guest l1e. We make one up so that the propagation code @@ -282,6 +304,9 @@ guest_walk_tables(struct vcpu *v, struct rc |= ((gflags & mflags) ^ mflags); } +#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ +set_ad: +#endif /* Now re-invert the user-mode requirement for SMEP. */ if ( smep ) rc ^= _PAGE_USER; @@ -295,17 +320,21 @@ guest_walk_tables(struct vcpu *v, struct #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */ if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) ) paging_mark_dirty(d, mfn_x(gw->l4mfn)); - if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e, 0) ) + if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e, + (pse1G && (pfec & PFEC_write_access))) ) paging_mark_dirty(d, mfn_x(gw->l3mfn)); #endif - if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e, - (pse && (pfec & PFEC_write_access))) ) - paging_mark_dirty(d, mfn_x(gw->l2mfn)); - if ( !pse ) + if ( !pse1G ) { - if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e, - (pfec & PFEC_write_access)) ) - paging_mark_dirty(d, mfn_x(gw->l1mfn)); + if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e, + (pse2M && (pfec & PFEC_write_access))) ) + paging_mark_dirty(d, mfn_x(gw->l2mfn)); + if ( !pse2M ) + { + if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e, + (pfec & PFEC_write_access)) ) + paging_mark_dirty(d, mfn_x(gw->l1mfn)); + } } } diff -r 4f2c59fb28e6 -r 6d15152fb59a xen/include/asm-x86/guest_pt.h --- a/xen/include/asm-x86/guest_pt.h +++ b/xen/include/asm-x86/guest_pt.h @@ -194,6 +194,17 @@ guest_supports_superpages(struct vcpu *v } static inline int +guest_supports_1G_superpages(struct vcpu *v) +{ + if (!guest_supports_superpages(v)) + return 0; + + return (GUEST_PAGING_LEVELS >= 4 + && hvm_pse1gb_supported(v->domain) + && hvm_long_mode_enabled(v)); +} + +static inline int guest_supports_nx(struct vcpu *v) { if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx ) diff -r 4f2c59fb28e6 -r 6d15152fb59a xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -224,6 +224,9 @@ int hvm_girq_dest_2_vcpu_id(struct domai #define hvm_hap_has_2mb(d) \ (hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB) +#define hvm_pse1gb_supported(d) \ + (cpu_has_page1gb && hvm_hap_has_1gb(d) && paging_mode_hap(d)) + #ifdef __x86_64__ #define hvm_long_mode_enabled(v) \ ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)