[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 2/9] mm: Place unscrubbed pages at the end of pagelist



>>> On 14.04.17 at 17:37, <boris.ostrovsky@xxxxxxxxxx> wrote:
> @@ -678,6 +680,20 @@ static void check_low_mem_virq(void)
>      }
>  }
>  
> +/* Pages that need scrub are added to tail, otherwise to head. */
> +static void page_list_add_scrub(struct page_info *pg, unsigned int node,
> +                                unsigned int zone, unsigned int order,
> +                                bool need_scrub)
> +{
> +    PFN_ORDER(pg) = order;
> +    pg->u.free.dirty_head = need_scrub;
> +
> +    if ( need_scrub )
> +        page_list_add_tail(pg, &heap(node, zone, order));
> +    else
> +        page_list_add(pg, &heap(node, zone, order));
> +}
> +
>  /* Allocate 2^@order contiguous pages. */
>  static struct page_info *alloc_heap_pages(
>      unsigned int zone_lo, unsigned int zone_hi,
> @@ -802,7 +818,7 @@ static struct page_info *alloc_heap_pages(
>      while ( j != order )
>      {
>          PFN_ORDER(pg) = --j;
> -        page_list_add_tail(pg, &heap(node, zone, j));
> +        page_list_add(pg, &heap(node, zone, j));

Don't you need to replicate pg->u.free.dirty_head (and hence use
page_list_add_scrub()) here too?

> @@ -851,11 +867,14 @@ static int reserve_offlined_page(struct page_info *head)
>      int zone = page_to_zone(head), i, head_order = PFN_ORDER(head), count = 
> 0;
>      struct page_info *cur_head;
>      int cur_order;
> +    bool need_scrub;

Please put this in the most narrow scope it's needed in.

>      ASSERT(spin_is_locked(&heap_lock));
>  
>      cur_head = head;
>  
> +    head->u.free.dirty_head = false;
> +
>      page_list_del(head, &heap(node, zone, head_order));
>  
>      while ( cur_head < (head + (1 << head_order)) )
> @@ -892,8 +911,16 @@ static int reserve_offlined_page(struct page_info *head)
>              {
>              merge:
>                  /* We don't consider merging outside the head_order. */
> -                page_list_add_tail(cur_head, &heap(node, zone, cur_order));
> -                PFN_ORDER(cur_head) = cur_order;
> +
> +                /* See if any of the pages need scrubbing. */
> +                need_scrub = false;
> +                for ( i = 0; i < (1 << cur_order); i++ )
> +                    if ( test_bit(_PGC_need_scrub, &cur_head[i].count_info) )
> +                    {
> +                        need_scrub = true;
> +                        break;
> +                    }

Can't you skip this loop when the incoming chunk has
->u.free.dirty_head clear?

> +static void scrub_free_pages(unsigned int node)
> +{
> +    struct page_info *pg;
> +    unsigned int zone, order;
> +    unsigned long i;

Here I would similarly appreciate if the local variables were moved
into the scopes they're actually needed in.

> +    ASSERT(spin_is_locked(&heap_lock));
> +
> +    if ( !node_need_scrub[node] )
> +        return;
> +
> +    for ( zone = 0; zone < NR_ZONES; zone++ )
> +    {
> +        order = MAX_ORDER;
> +        do {
> +            while ( !page_list_empty(&heap(node, zone, order)) )
> +            {
> +                /* Unscrubbed pages are always at the end of the list. */
> +                pg = page_list_last(&heap(node, zone, order));
> +                if ( !pg->u.free.dirty_head )
> +                    break;
> +
> +                for ( i = 0; i < (1UL << order); i++)
> +                {
> +                    if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
> +                    {
> +                        scrub_one_page(&pg[i]);
> +                        pg[i].count_info &= ~PGC_need_scrub;
> +                        node_need_scrub[node]--;
> +                    }
> +                }
> +
> +                page_list_del(pg, &heap(node, zone, order));
> +                merge_and_free_buddy(pg, node, zone, order, false);

Is there actually any merging involved here, i.e. can't you
simply put back the buddy at the list head?

> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -45,6 +45,8 @@ struct page_info
>          struct {
>              /* Do TLBs need flushing for safety before next page use? */
>              bool_t need_tlbflush;
> +            /* Set on a buddy head if the buddy has unscrubbed pages. */
> +            bool dirty_head;
>          } free;
>  
>      } u;
> @@ -115,6 +117,10 @@ struct page_info
>  #define PGC_count_width   PG_shift(9)
>  #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
>  
> +/* Page needs to be scrubbed */
> +#define _PGC_need_scrub   PG_shift(10)
> +#define PGC_need_scrub    PG_mask(1, 10)

This is at least dangerous: You're borrowing a bit from
PGC_count_mask. That's presumably okay because you only
ever set the bit on free pages (and it is being zapped by
alloc_heap_pages() setting ->count_info to PGC_state_inuse),
but I think it would be more explicit that you borrow another bit
if you re-used one of the existing ones (like PGC_allocated), and
then did so by using a straight #define to that other value.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.