[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 1/3] replace vCPU's dirty CPU mask by numeric ID
Hi Jan, On 23/01/18 10:12, Jan Beulich wrote: At most one bit can be set in the masks, so especially on larger systems it's quite a bit of unnecessary memory and processing overhead to track the information as a mask. Store the numeric ID of the respective CPU instead, or VCPU_CPU_CLEAN if no dirty state exists. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Julien Grall <julien.grall@xxxxxxxxxx> Cheers, --- ARM adjustments compile tested only. --- v2: Introduce VCPU_CPU_CLEAN and vcpu_cpu_dirty(). Re-word comments. Re-base. --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -330,7 +330,7 @@ void context_switch(struct vcpu *prev, s { ASSERT(local_irq_is_enabled()); ASSERT(prev != next); - ASSERT(cpumask_empty(next->vcpu_dirty_cpumask)); + ASSERT(!vcpu_cpu_dirty(next));if ( prev != next )update_runstate_area(prev); @@ -471,7 +471,7 @@ void startup_cpu_idle_loop(void) ASSERT(is_idle_vcpu(v)); /* TODO cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask); - cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask); + v->dirty_cpu = v->processor; */reset_stack_and_jump(idle_loop);--- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -146,7 +146,7 @@ void startup_cpu_idle_loop(void)ASSERT(is_idle_vcpu(v));cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask); - cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask); + v->dirty_cpu = v->processor;reset_stack_and_jump(idle_loop);} @@ -1602,7 +1602,7 @@ static void __context_switch(void) struct desc_ptr gdt_desc;ASSERT(p != n);- ASSERT(cpumask_empty(n->vcpu_dirty_cpumask)); + ASSERT(!vcpu_cpu_dirty(n));if ( !is_idle_domain(pd) ){ @@ -1618,7 +1618,7 @@ static void __context_switch(void) */ if ( pd != nd ) cpumask_set_cpu(cpu, nd->domain_dirty_cpumask); - cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask); + n->dirty_cpu = cpu;if ( !is_idle_domain(nd) ){ @@ -1674,7 +1674,7 @@ static void __context_switch(void)if ( pd != nd )cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask); - cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask); + p->dirty_cpu = VCPU_CPU_CLEAN;per_cpu(curr_vcpu, cpu) = n;} @@ -1684,20 +1684,16 @@ void context_switch(struct vcpu *prev, s { unsigned int cpu = smp_processor_id(); const struct domain *prevd = prev->domain, *nextd = next->domain; - cpumask_t dirty_mask; + unsigned int dirty_cpu = next->dirty_cpu;ASSERT(local_irq_is_enabled()); get_cpu_info()->xen_cr3 = 0; - cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);- /* Allow at most one CPU at a time to be dirty. */ - ASSERT(cpumask_weight(&dirty_mask) <= 1); - if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) && - !cpumask_empty(&dirty_mask)) ) + if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN ) { - /* Other cpus call __sync_local_execstate from flush ipi handler. */ - flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_VCPU_STATE); + /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */ + flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE); }if ( prev != next )@@ -1802,11 +1798,14 @@ void sync_local_execstate(void)void sync_vcpu_execstate(struct vcpu *v){ - if ( cpumask_test_cpu(smp_processor_id(), v->vcpu_dirty_cpumask) ) + if ( v->dirty_cpu == smp_processor_id() ) sync_local_execstate();- /* Other cpus call __sync_local_execstate from flush ipi handler. */- flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_VCPU_STATE); + if ( vcpu_cpu_dirty(v) ) + { + /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */ + flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE); + } }static int relinquish_memory(--- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -1212,7 +1212,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, for_each_vcpu ( pg_owner, v ) { if ( pv_destroy_ldt(v) ) - flush_tlb_mask(v->vcpu_dirty_cpumask); + flush_tlb_mask(cpumask_of(v->dirty_cpu)); } } put_page(page); @@ -2937,8 +2937,8 @@ static inline int vcpumask_to_pcpumask( vcpu_id += vcpu_bias; if ( (vcpu_id >= d->max_vcpus) ) return 0; - if ( ((v = d->vcpu[vcpu_id]) != NULL) ) - cpumask_or(pmask, pmask, v->vcpu_dirty_cpumask); + if ( ((v = d->vcpu[vcpu_id]) != NULL) && vcpu_cpu_dirty(v) ) + __cpumask_set_cpu(v->dirty_cpu, pmask); } } } --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -135,6 +135,7 @@ struct vcpu *alloc_vcpu(v->domain = d;v->vcpu_id = vcpu_id; + v->dirty_cpu = VCPU_CPU_CLEAN;spin_lock_init(&v->virq_lock); @@ -145,8 +146,7 @@ struct vcpu *alloc_vcpu(if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) || !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) || !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) || - !zalloc_cpumask_var(&v->cpu_soft_affinity) || - !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) ) + !zalloc_cpumask_var(&v->cpu_soft_affinity) ) goto fail_free;if ( is_idle_domain(d) )@@ -175,7 +175,6 @@ struct vcpu *alloc_vcpu( free_cpumask_var(v->cpu_hard_affinity_tmp); free_cpumask_var(v->cpu_hard_affinity_saved); free_cpumask_var(v->cpu_soft_affinity); - free_cpumask_var(v->vcpu_dirty_cpumask); free_vcpu_struct(v); return NULL; } @@ -863,7 +862,6 @@ static void complete_domain_destroy(stru free_cpumask_var(v->cpu_hard_affinity_tmp); free_cpumask_var(v->cpu_hard_affinity_saved); free_cpumask_var(v->cpu_soft_affinity); - free_cpumask_var(v->vcpu_dirty_cpumask); free_vcpu_struct(v); }--- a/xen/common/keyhandler.c+++ b/xen/common/keyhandler.c @@ -340,8 +340,9 @@ static void dump_domains(unsigned char k v->is_running ? 'T':'F', v->poll_evtchn, vcpu_info(v, evtchn_upcall_pending), !vcpu_event_delivery_is_enabled(v)); - cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask); - printk("dirty_cpus=%s\n", tmpstr); + if ( vcpu_cpu_dirty(v) ) + printk("dirty_cpu=%u", v->dirty_cpu); + printk("\n"); cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity); printk(" cpu_hard_affinity=%s ", tmpstr); cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity); --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -210,6 +210,9 @@ struct vcpu bool hcall_compat; #endif+ /* The CPU, if any, which is holding onto this VCPU's state. */+#define VCPU_CPU_CLEAN (~0u) + unsigned int dirty_cpu;/** > 0: a single port is being polled; @@ -248,9 +251,6 @@ struct vcpu /* Bitmask of CPUs on which this VCPU prefers to run. */ cpumask_var_t cpu_soft_affinity;- /* Bitmask of CPUs which are holding onto this VCPU's state. */- cpumask_var_t vcpu_dirty_cpumask; - /* Tasklet for continue_hypercall_on_cpu(). */ struct tasklet continue_hypercall_tasklet;@@ -803,6 +803,11 @@ static inline int vcpu_runnable(struct vatomic_read(&v->domain->pause_count)); }+static inline bool vcpu_cpu_dirty(const struct vcpu *v)+{ + return v->dirty_cpu != VCPU_CPU_CLEAN; +} + void vcpu_block(void); void vcpu_unblock(struct vcpu *v); void vcpu_pause(struct vcpu *v); _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel -- Julien Grall _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |