|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v4 16/19] mini-os: map page allocator's bitmap to virtual kernel area for ballooning
Juergen Gross, on Thu 11 Aug 2016 13:06:36 +0200, wrote:
> In case of CONFIG_BALLOON the page allocator's bitmap needs some space
> to be able to grow. Remap it to kernel virtual area if the preallocated
> area isn't large enough.
>
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxx>
> ---
> V4: - mm_bitmap* -> mm_alloc_bitmap* as requested by Samuel Thibault
>
> V3: - add assertion as requested by Samuel Thibault
> - rename functions to have mm_ prefix as requested by Samuel Thibault
> ---
> balloon.c | 18 ++++++++++++++++++
> include/balloon.h | 2 ++
> include/mm.h | 6 ++++++
> mm.c | 44 +++++++++++++++++++++++---------------------
> 4 files changed, 49 insertions(+), 21 deletions(-)
>
> diff --git a/balloon.c b/balloon.c
> index 1ec113d..0a3342c 100644
> --- a/balloon.c
> +++ b/balloon.c
> @@ -44,3 +44,21 @@ void get_max_pages(void)
> nr_max_pages = ret;
> printk("Maximum memory size: %ld pages\n", nr_max_pages);
> }
> +
> +void mm_alloc_bitmap_remap(void)
> +{
> + unsigned long i;
> +
> + if ( mm_alloc_bitmap_size >= ((nr_max_pages + 1) >> (PAGE_SHIFT + 3)) )
> + return;
> +
> + for ( i = 0; i < mm_alloc_bitmap_size; i += PAGE_SIZE )
> + {
> + map_frame_rw(virt_kernel_area_end + i,
> + virt_to_mfn((unsigned long)(mm_alloc_bitmap) + i));
> + }
> +
> + mm_alloc_bitmap = (unsigned long *)virt_kernel_area_end;
> + virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT +
> 3));
> + ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
> +}
> diff --git a/include/balloon.h b/include/balloon.h
> index b0d0ebf..9154f44 100644
> --- a/include/balloon.h
> +++ b/include/balloon.h
> @@ -31,11 +31,13 @@ extern unsigned long virt_kernel_area_end;
>
> void get_max_pages(void);
> void arch_remap_p2m(unsigned long max_pfn);
> +void mm_alloc_bitmap_remap(void);
>
> #else /* CONFIG_BALLOON */
>
> static inline void get_max_pages(void) { }
> static inline void arch_remap_p2m(unsigned long max_pfn) { }
> +static inline void mm_alloc_bitmap_remap(void) { }
>
> #endif /* CONFIG_BALLOON */
> #endif /* _BALLOON_H_ */
> diff --git a/include/mm.h b/include/mm.h
> index 6add683..fc3128b 100644
> --- a/include/mm.h
> +++ b/include/mm.h
> @@ -42,8 +42,14 @@
> #define STACK_SIZE_PAGE_ORDER __STACK_SIZE_PAGE_ORDER
> #define STACK_SIZE __STACK_SIZE
>
> +#define round_pgdown(_p) ((_p) & PAGE_MASK)
> +#define round_pgup(_p) (((_p) + (PAGE_SIZE - 1)) & PAGE_MASK)
> +
> extern unsigned long nr_free_pages;
>
> +extern unsigned long *mm_alloc_bitmap;
> +extern unsigned long mm_alloc_bitmap_size;
> +
> void init_mm(void);
> unsigned long alloc_pages(int order);
> #define alloc_page() alloc_pages(0)
> diff --git a/mm.c b/mm.c
> index 707a3e0..9e3a479 100644
> --- a/mm.c
> +++ b/mm.c
> @@ -48,11 +48,14 @@
> * One bit per page of memory. Bit set => page is allocated.
> */
>
> -static unsigned long *alloc_bitmap;
> +unsigned long *mm_alloc_bitmap;
> +unsigned long mm_alloc_bitmap_size;
> +
> #define PAGES_PER_MAPWORD (sizeof(unsigned long) * 8)
>
> #define allocated_in_map(_pn) \
> -(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] &
> (1UL<<((_pn)&(PAGES_PER_MAPWORD-1))))
> + (mm_alloc_bitmap[(_pn) / PAGES_PER_MAPWORD] & \
> + (1UL << ((_pn) & (PAGES_PER_MAPWORD - 1))))
>
> unsigned long nr_free_pages;
>
> @@ -61,8 +64,8 @@ unsigned long nr_free_pages;
> * -(1<<n) sets all bits >= n.
> * (1<<n)-1 sets all bits < n.
> * Variable names in map_{alloc,free}:
> - * *_idx == Index into `alloc_bitmap' array.
> - * *_off == Bit offset within an element of the `alloc_bitmap' array.
> + * *_idx == Index into `mm_alloc_bitmap' array.
> + * *_off == Bit offset within an element of the `mm_alloc_bitmap' array.
> */
>
> static void map_alloc(unsigned long first_page, unsigned long nr_pages)
> @@ -76,13 +79,13 @@ static void map_alloc(unsigned long first_page, unsigned
> long nr_pages)
>
> if ( curr_idx == end_idx )
> {
> - alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
> + mm_alloc_bitmap[curr_idx] |= ((1UL<<end_off)-1) & -(1UL<<start_off);
> }
> else
> {
> - alloc_bitmap[curr_idx] |= -(1UL<<start_off);
> - while ( ++curr_idx < end_idx ) alloc_bitmap[curr_idx] = ~0UL;
> - alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
> + mm_alloc_bitmap[curr_idx] |= -(1UL<<start_off);
> + while ( ++curr_idx < end_idx ) mm_alloc_bitmap[curr_idx] = ~0UL;
> + mm_alloc_bitmap[curr_idx] |= (1UL<<end_off)-1;
> }
>
> nr_free_pages -= nr_pages;
> @@ -102,13 +105,13 @@ static void map_free(unsigned long first_page, unsigned
> long nr_pages)
>
> if ( curr_idx == end_idx )
> {
> - alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
> + mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off) | ((1UL<<start_off)-1);
> }
> else
> {
> - alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
> - while ( ++curr_idx != end_idx ) alloc_bitmap[curr_idx] = 0;
> - alloc_bitmap[curr_idx] &= -(1UL<<end_off);
> + mm_alloc_bitmap[curr_idx] &= (1UL<<start_off)-1;
> + while ( ++curr_idx != end_idx ) mm_alloc_bitmap[curr_idx] = 0;
> + mm_alloc_bitmap[curr_idx] &= -(1UL<<end_off);
> }
> }
>
> @@ -137,9 +140,6 @@ static chunk_head_t *free_head[FREELIST_SIZE];
> static chunk_head_t free_tail[FREELIST_SIZE];
> #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
>
> -#define round_pgdown(_p) ((_p)&PAGE_MASK)
> -#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
> -
> /*
> * Initialise allocator, placing addresses [@min,@max] in free pool.
> * @min and @max are PHYSICAL addresses.
> @@ -147,7 +147,7 @@ static chunk_head_t free_tail[FREELIST_SIZE];
> static void init_page_allocator(unsigned long min, unsigned long max)
> {
> int i;
> - unsigned long range, bitmap_size;
> + unsigned long range;
> chunk_head_t *ch;
> chunk_tail_t *ct;
> for ( i = 0; i < FREELIST_SIZE; i++ )
> @@ -161,14 +161,14 @@ static void init_page_allocator(unsigned long min,
> unsigned long max)
> max = round_pgdown(max);
>
> /* Allocate space for the allocation bitmap. */
> - bitmap_size = (max+1) >> (PAGE_SHIFT+3);
> - bitmap_size = round_pgup(bitmap_size);
> - alloc_bitmap = (unsigned long *)to_virt(min);
> - min += bitmap_size;
> + mm_alloc_bitmap_size = (max + 1) >> (PAGE_SHIFT + 3);
> + mm_alloc_bitmap_size = round_pgup(mm_alloc_bitmap_size);
> + mm_alloc_bitmap = (unsigned long *)to_virt(min);
> + min += mm_alloc_bitmap_size;
> range = max - min;
>
> /* All allocated by default. */
> - memset(alloc_bitmap, ~0, bitmap_size);
> + memset(mm_alloc_bitmap, ~0, mm_alloc_bitmap_size);
> /* Free up the memory we've been given to play with. */
> map_free(PHYS_PFN(min), range>>PAGE_SHIFT);
>
> @@ -198,6 +198,8 @@ static void init_page_allocator(unsigned long min,
> unsigned long max)
> free_head[i] = ch;
> ct->level = i;
> }
> +
> + mm_alloc_bitmap_remap();
> }
>
>
> --
> 2.6.6
>
--
Samuel
"...Deep Hack Mode--that mysterious and frightening state of
consciousness where Mortal Users fear to tread."
(By Matt Welsh)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |