[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] x86/hvm: improve performance of HVMOP_flush_tlbs
There's no need to call paging_update_cr3 unless CR3 trapping is enabled, and that's only the case when using shadow paging or when requested for introspection purposes, otherwise there's no need to pause all the vCPUs of the domain in order to perform the flush. Check whether CR3 trapping is currently in use in order to decide whether the vCPUs should be paused, otherwise just perform the flush. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 55 ++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 4dfaf35566..7dcc16afc6 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3985,25 +3985,36 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), static DEFINE_PER_CPU(cpumask_t, flush_cpumask); cpumask_t *mask = &this_cpu(flush_cpumask); struct domain *d = current->domain; + /* + * CR3 trapping is only enabled when running with shadow paging or when + * requested for introspection purposes, otherwise there's no need to call + * paging_update_cr3 and hence pause all vCPUs. + */ + bool trap_cr3 = !paging_mode_hap(d) || + (d->arch.monitor.write_ctrlreg_enabled & + monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)); struct vcpu *v; - /* Avoid deadlock if more than one vcpu tries this at the same time. */ - if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) - return false; + if ( trap_cr3 ) + { + /* Avoid deadlock if more than one vcpu tries this at the same time. */ + if ( !spin_trylock(&d->hypercall_deadlock_mutex) ) + return false; - /* Pause all other vcpus. */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(ctxt, v) ) - vcpu_pause_nosync(v); + /* Pause all other vcpus. */ + for_each_vcpu ( d, v ) + if ( v != current && flush_vcpu(ctxt, v) ) + vcpu_pause_nosync(v); - /* Now that all VCPUs are signalled to deschedule, we wait... */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(ctxt, v) ) - while ( !vcpu_runnable(v) && v->is_running ) - cpu_relax(); + /* Now that all VCPUs are signalled to deschedule, we wait... */ + for_each_vcpu ( d, v ) + if ( v != current && flush_vcpu(ctxt, v) ) + while ( !vcpu_runnable(v) && v->is_running ) + cpu_relax(); - /* All other vcpus are paused, safe to unlock now. */ - spin_unlock(&d->hypercall_deadlock_mutex); + /* All other vcpus are paused, safe to unlock now. */ + spin_unlock(&d->hypercall_deadlock_mutex); + } cpumask_clear(mask); @@ -4015,8 +4026,15 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), if ( !flush_vcpu(ctxt, v) ) continue; - paging_update_cr3(v, false); + if ( trap_cr3 ) + paging_update_cr3(v, false); + /* + * It's correct to do this flush without pausing the vCPUs: any vCPU + * context switch will already flush the tlb and the worse that could + * happen is that Xen ends up performing flushes on pCPUs that are no + * longer running the target vCPUs. + */ cpu = read_atomic(&v->dirty_cpu); if ( is_vcpu_dirty_cpu(cpu) ) __cpumask_set_cpu(cpu, mask); @@ -4026,9 +4044,10 @@ bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), flush_tlb_mask(mask); /* Done. */ - for_each_vcpu ( d, v ) - if ( v != current && flush_vcpu(ctxt, v) ) - vcpu_unpause(v); + if ( trap_cr3 ) + for_each_vcpu ( d, v ) + if ( v != current && flush_vcpu(ctxt, v) ) + vcpu_unpause(v); return true; } -- 2.24.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |