x86: don't use destroy_xen_mappings() for vunmap() Its attempt to tear down intermediate page table levels may race with map_pages_to_xen() establishing them, and now that map_domain_page_global() is backed by vmap() this teardown is also wasteful (as it's very likely to need the same address space populated again within foreseeable time). As the race between vmap() and vunmap(), according to the latest stage tester logs, doesn't appear to be the only one still left, the patch also adds logging for vmap() and vunmap() uses (there shouldn't be too many of them, so logs shouldn't get flooded). These are supposed to get removed (and are made stand out clearly) as soon as we're certain that there's no issue left. Signed-off-by: Jan Beulich --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -177,6 +177,7 @@ void *__vmap(const unsigned long *mfn, u void *va = vm_alloc(nr * granularity, align); unsigned long cur = (unsigned long)va; +printk("vmap(%p:%#x)\n", va, nr * granularity);//temp for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity ) { if ( map_pages_to_xen(cur, *mfn, granularity, flags) ) @@ -196,9 +197,14 @@ void *vmap(const unsigned long *mfn, uns void vunmap(const void *va) { +#ifndef _PAGE_NONE unsigned long addr = (unsigned long)va; destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va)); +#else /* Avoid tearing down intermediate page tables. */ +printk("vunmap(%p:%#x)\n", va, vm_size(va));//temp + map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE); +#endif vm_free(va); } #endif --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -288,6 +288,7 @@ extern l1_pgentry_t l1_identmap[L1_PAGET void paging_init(void); #endif /* !defined(__ASSEMBLY__) */ +#define _PAGE_NONE _AC(0x000,U) #define _PAGE_PRESENT _AC(0x001,U) #define _PAGE_RW _AC(0x002,U) #define _PAGE_USER _AC(0x004,U)