|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] Ping: [PATCH] fix off-by-one mistakes in vm_alloc()
Ping? (This is expected to fix recurring testsuite failures.)
>>> On 18.07.13 at 11:59, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:
> Also add another pair of assertions to catch eventual further cases of
> incorrect accounting, and remove the temporary debuggin messages again
> which commit 68caac7f ("x86: don't use destroy_xen_mappings() for
> vunmap()") added.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -57,8 +57,8 @@ void *vm_alloc(unsigned int nr, unsigned
> {
> struct page_info *pg;
>
> - ASSERT(!test_bit(vm_low, vm_bitmap));
> - for ( start = vm_low; ; )
> + ASSERT(vm_low == vm_top || !test_bit(vm_low, vm_bitmap));
> + for ( start = vm_low; start < vm_top; )
> {
> bit = find_next_bit(vm_bitmap, vm_top, start + 1);
> if ( bit > vm_top )
> @@ -68,12 +68,18 @@ void *vm_alloc(unsigned int nr, unsigned
> * corresponding page a guard one.
> */
> start = (start + align) & ~(align - 1);
> - if ( start + nr <= bit )
> - break;
> - start = bit < vm_top ?
> - find_next_zero_bit(vm_bitmap, vm_top, bit + 1) : bit;
> - if ( start >= vm_top )
> - break;
> + if ( bit < vm_top )
> + {
> + if ( start + nr < bit )
> + break;
> + start = find_next_zero_bit(vm_bitmap, vm_top, bit + 1);
> + }
> + else
> + {
> + if ( start + nr <= bit )
> + break;
> + start = bit;
> + }
> }
>
> if ( start < vm_top )
> @@ -115,6 +121,10 @@ void *vm_alloc(unsigned int nr, unsigned
>
> for ( bit = start; bit < start + nr; ++bit )
> __set_bit(bit, vm_bitmap);
> + if ( bit < vm_top )
> + ASSERT(!test_bit(bit, vm_bitmap));
> + else
> + ASSERT(bit == vm_top);
> if ( start <= vm_low + 2 )
> vm_low = bit;
> spin_unlock(&vm_lock);
> @@ -177,7 +187,6 @@ void *__vmap(const unsigned long *mfn, u
> void *va = vm_alloc(nr * granularity, align);
> unsigned long cur = (unsigned long)va;
>
> -printk("vmap(%p:%#x)\n", va, nr * granularity);//temp
> for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
> {
> if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
> @@ -202,7 +211,6 @@ void vunmap(const void *va)
>
> destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
> #else /* Avoid tearing down intermediate page tables. */
> -printk("vunmap(%p:%#x)\n", va, vm_size(va));//temp
> map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
> #endif
> vm_free(va);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |