[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH] x86: idle domains don't have a domain-page mapcache
First and foremost correct a comment implying the opposite. Then, to make things more clear PV-vs-HVM-wise, move the PV check earlier in the function, making it unnecessary for both callers to perform the check individually. Finally return NULL from the function when using the idle domain's page tables, allowing a dcache->inuse check to also become an assertion. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -28,8 +28,11 @@ static inline struct vcpu *mapcache_curr /* * When current isn't properly set up yet, this is equivalent to * running in an idle vCPU (callers must check for NULL). + * + * Non-PV domains don't have any mapcache. For idle domains (which + * appear to be PV but also have no mapcache) see below. */ - if ( !v ) + if ( !v || !is_pv_vcpu(v) ) return NULL; /* @@ -41,19 +44,22 @@ static inline struct vcpu *mapcache_curr return NULL; /* - * If guest_table is NULL, and we are running a paravirtualised guest, - * then it means we are running on the idle domain's page table and must - * therefore use its mapcache. + * If guest_table is NULL for a PV domain (which includes IDLE), then it + * means we are running on the idle domain's page tables and therefore + * must not use any mapcache. */ - if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) ) + if ( unlikely(pagetable_is_null(v->arch.guest_table)) ) { /* If we really are idling, perform lazy context switch now. */ - if ( (v = idle_vcpu[smp_processor_id()]) == current ) + if ( idle_vcpu[smp_processor_id()] == current ) sync_local_execstate(); /* We must now be running on the idle page table. */ ASSERT(cr3_pa(read_cr3()) == __pa(idle_pg_table)); + return NULL; } + ASSERT(!is_idle_vcpu(v)); + return v; } @@ -82,13 +88,12 @@ void *map_domain_page(mfn_t mfn) #endif v = mapcache_current_vcpu(); - if ( !v || !is_pv_vcpu(v) ) + if ( !v ) return mfn_to_virt(mfn_x(mfn)); dcache = &v->domain->arch.pv.mapcache; vcache = &v->arch.pv.mapcache; - if ( !dcache->inuse ) - return mfn_to_virt(mfn_x(mfn)); + ASSERT(dcache->inuse); perfc_incr(map_domain_page_count); @@ -187,7 +192,7 @@ void unmap_domain_page(const void *ptr) ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); v = mapcache_current_vcpu(); - ASSERT(v && is_pv_vcpu(v)); + ASSERT(v); dcache = &v->domain->arch.pv.mapcache; ASSERT(dcache->inuse);
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |