diff -r 7926538a6332 xen/arch/x86/hvm/asid.c --- a/xen/arch/x86/hvm/asid.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/asid.c Tue Jan 11 14:18:19 2011 +0100 @@ -52,11 +52,19 @@ struct hvm_asid_data { u32 next_asid; u32 max_asid; bool_t disabled; + bool_t flush_by_asid; }; + +/* ASID generation 0 and 1 are reserved for special purposes */ +#define FLUSH_TLB_BY_OWN_ASID 0 +#define FLUSH_TLB_BY_NEW_ASID 1 +#define FIRST_VALID_ASID_GEN 2 static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data); -void hvm_asid_init(int nasids) +/* Caller needs to provide two parameters: # of ASIDs and whether the CPU + * supports flush by ASID. */ +void hvm_asid_init(int nasids, bool_t flush_by_asid) { static s8 g_disabled = -1; struct hvm_asid_data *data = &this_cpu(hvm_asid_data); @@ -66,21 +74,37 @@ void hvm_asid_init(int nasids) if ( g_disabled != data->disabled ) { - printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en"); + printk("HVM: ASIDs %sabled", data->disabled ? "dis" : "en"); + if ( data->disabled ) + printk("\n"); + else + printk(" (# of ASIDs = 0x%x, flush by ASID = %s)\n", nasids, + flush_by_asid ? "true" : "false"); + if ( g_disabled < 0 ) g_disabled = data->disabled; } - /* Zero indicates 'invalid generation', so we start the count at one. */ - data->core_asid_generation = 1; + data->flush_by_asid = flush_by_asid; + + /* 0 and 1 are reserved. They indicates 'invalid generation' for special + * purposes. So we start the count at 2. */ + data->core_asid_generation = FIRST_VALID_ASID_GEN; /* Zero indicates 'ASIDs disabled', so we start the count at one. */ data->next_asid = 1; } -void hvm_asid_flush_vcpu(struct vcpu *v) +/* There are two ways to flush VCPU with ASID: (1) assign a new ASID or (2) + * flush by ASID if CPU supports this feature. The second parameter of this + * function specifies whether caller wants a new ASID. For instance if guest + * changes its CR3 or paging mode, this parameter is FALSE. For VCPU migration, + * this is TRUE. + */ +void hvm_asid_flush_vcpu(struct vcpu *v, bool_t need_new_asid) { - v->arch.hvm_vcpu.asid_generation = 0; + v->arch.hvm_vcpu.asid_generation = need_new_asid ? FLUSH_TLB_BY_NEW_ASID : + FLUSH_TLB_BY_OWN_ASID; } void hvm_asid_flush_core(void) @@ -102,7 +126,7 @@ void hvm_asid_flush_core(void) data->disabled = 1; } -bool_t hvm_asid_handle_vmenter(void) +asid_action_t hvm_asid_handle_vmenter(void) { struct vcpu *curr = current; struct hvm_asid_data *data = &this_cpu(hvm_asid_data); @@ -114,7 +138,16 @@ bool_t hvm_asid_handle_vmenter(void) /* Test if VCPU has valid ASID. */ if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation ) - return 0; + return asid_flush_none; + + /* If VCPU doesn't need a new ASID and CPU supports flush by ASID, we + * skip ASID incremental and return directly from here */ + if ( curr->arch.hvm_vcpu.asid_generation == FLUSH_TLB_BY_OWN_ASID && + data->flush_by_asid ) + { + curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation; + return asid_flush_itself; + } /* If there are no free ASIDs, need to go to a new generation */ if ( unlikely(data->next_asid > data->max_asid) ) @@ -131,13 +164,14 @@ bool_t hvm_asid_handle_vmenter(void) /* * When we assign ASID 1, flush all TLB entries as we are starting a new - * generation, and all old ASID allocations are now stale. + * generation, and all old ASID allocations are now stale. Otherwise, + * Nothing needs to be flushed. */ - return (curr->arch.hvm_vcpu.asid == 1); + return (curr->arch.hvm_vcpu.asid == 1) ? asid_flush_all : asid_flush_none; disabled: curr->arch.hvm_vcpu.asid = 0; - return 0; + return asid_flush_none; } /* diff -r 7926538a6332 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/hvm.c Tue Jan 11 14:18:19 2011 +0100 @@ -935,7 +935,7 @@ int hvm_vcpu_initialise(struct vcpu *v) { int rc; - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 1); if ( (rc = vlapic_init(v)) != 0 ) goto fail1; diff -r 7926538a6332 xen/arch/x86/hvm/svm/asid.c --- a/xen/arch/x86/hvm/svm/asid.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/svm/asid.c Tue Jan 11 14:18:19 2011 +0100 @@ -31,7 +31,7 @@ void svm_asid_init(struct cpuinfo_x86 *c if ( !cpu_has_amd_erratum(c, AMD_ERRATUM_170) ) nasids = cpuid_ebx(0x8000000A); - hvm_asid_init(nasids); + hvm_asid_init(nasids, 0); } /* @@ -42,7 +42,7 @@ asmlinkage void svm_asid_handle_vmrun(vo { struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; - bool_t need_flush = hvm_asid_handle_vmenter(); + bool_t need_flush = !!hvm_asid_handle_vmenter(); /* ASID 0 indicates that ASIDs are disabled. */ if ( curr->arch.hvm_vcpu.asid == 0 ) diff -r 7926538a6332 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Jan 11 14:18:19 2011 +0100 @@ -409,7 +409,7 @@ static void svm_update_guest_cr(struct v break; case 3: vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]); - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 0); break; case 4: value = HVM_CR4_HOST_MASK; @@ -707,7 +707,7 @@ static void svm_do_resume(struct vcpu *v hvm_migrate_timers(v); /* Migrating to another ASID domain. Request a new ASID. */ - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 1); } /* Reflect the vlapic's TPR in the hardware vtpr */ diff -r 7926538a6332 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Jan 11 14:18:19 2011 +0100 @@ -522,7 +522,7 @@ int vmx_cpu_up(void) BUG(); } - hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0); + hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0, 0); if ( cpu_has_vmx_ept ) ept_sync_all(); @@ -1079,7 +1079,7 @@ void vmx_do_resume(struct vcpu *v) hvm_migrate_timers(v); hvm_migrate_pirqs(v); vmx_set_host_env(v); - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 1); } debug_state = v->domain->debugger_attached diff -r 7926538a6332 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Jan 11 14:18:19 2011 +0100 @@ -1150,7 +1150,7 @@ static void vmx_update_guest_cr(struct v } __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]); - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 0); break; case 4: v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK; @@ -1335,7 +1335,7 @@ static void vmx_set_uc_mode(struct vcpu if ( paging_mode_hap(v->domain) ) ept_change_entry_emt_with_range( v->domain, 0, p2m_get_hostp2m(v->domain)->max_mapped_pfn); - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 0); } static void vmx_set_info_guest(struct vcpu *v) @@ -2665,7 +2665,7 @@ asmlinkage void vmx_vmenter_helper(void) goto out; old_asid = curr->arch.hvm_vcpu.asid; - need_flush = hvm_asid_handle_vmenter(); + need_flush = !!hvm_asid_handle_vmenter(); /* convert to boolean */ new_asid = curr->arch.hvm_vcpu.asid; if ( unlikely(new_asid != old_asid) ) diff -r 7926538a6332 xen/include/asm-x86/hvm/asid.h --- a/xen/include/asm-x86/hvm/asid.h Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/include/asm-x86/hvm/asid.h Tue Jan 11 14:18:19 2011 +0100 @@ -24,18 +24,25 @@ struct vcpu; +typedef enum { + asid_flush_none = 0x0, /* don't need to flush any TLBs */ + asid_flush_all = 0x1, /* flush TLBs with all ASIDs */ + asid_flush_itself = 0x3, /* only flush ASID's own TLBs. Note 3 is used + * here (from AMD's flush by ASID feature). */ +} asid_action_t; + /* Initialise ASID management for the current physical CPU. */ -void hvm_asid_init(int nasids); +void hvm_asid_init(int nasids, bool_t flush_by_asid); -/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */ -void hvm_asid_flush_vcpu(struct vcpu *v); +/* Invalidate a VCPU's current ASID */ +void hvm_asid_flush_vcpu(struct vcpu *v, bool_t need_new_asid); /* Flush all ASIDs on this processor core. */ void hvm_asid_flush_core(void); /* Called before entry to guest context. Checks ASID allocation, returns a * boolean indicating whether all ASIDs must be flushed. */ -bool_t hvm_asid_handle_vmenter(void); +asid_action_t hvm_asid_handle_vmenter(void); #endif /* __ASM_X86_HVM_ASID_H__ */ diff -r 7926538a6332 xen/include/asm-x86/hvm/svm/asid.h --- a/xen/include/asm-x86/hvm/svm/asid.h Tue Jan 11 11:41:39 2011 +0000 +++ b/xen/include/asm-x86/hvm/svm/asid.h Tue Jan 11 14:18:19 2011 +0100 @@ -41,7 +41,7 @@ static inline void svm_asid_g_invlpg(str #endif /* Safe fallback. Take a new ASID. */ - hvm_asid_flush_vcpu(v); + hvm_asid_flush_vcpu(v, 0); } #endif /* __ASM_X86_HVM_SVM_ASID_H__ */