[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/6] x86: distinguish CPU offlining from CPU removal



On Wed, Jul 18, 2018 at 02:19:29AM -0600, Jan Beulich wrote:
> --- a/xen/arch/x86/genapic/x2apic.c
> +++ b/xen/arch/x86/genapic/x2apic.c
> @@ -201,18 +201,21 @@ static int update_clusterinfo(
>          if ( !cluster_cpus_spare )
>              cluster_cpus_spare = xzalloc(cpumask_t);
>          if ( !cluster_cpus_spare ||
> -             !alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) )
> +             !cond_alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) )
>              err = -ENOMEM;
>          break;
>      case CPU_UP_CANCELED:
>      case CPU_DEAD:
> +    case CPU_REMOVE:
> +        if ( park_offline_cpus == (action != CPU_REMOVE) )
> +            break;

I think this would be clearer as:

case CPU_UP_CANCELED:
case CPU_DEAD:
    if ( park_offline_cpus )
        break;

    /* fallthrough */
case CPU_REMOVE:
    if ( per_cpu...

If it's safe to do so (similar to what you do in cpu_callback).

>          if ( per_cpu(cluster_cpus, cpu) )
>          {
>              cpumask_clear_cpu(cpu, per_cpu(cluster_cpus, cpu));
>              if ( cpumask_empty(per_cpu(cluster_cpus, cpu)) )
> -                xfree(per_cpu(cluster_cpus, cpu));
> +                XFREE(per_cpu(cluster_cpus, cpu));
>          }
> -        free_cpumask_var(per_cpu(scratch_mask, cpu));
> +        FREE_CPUMASK_VAR(per_cpu(scratch_mask, cpu));
>          break;
>      }
>  
> --- a/xen/arch/x86/percpu.c
> +++ b/xen/arch/x86/percpu.c
> @@ -28,7 +28,7 @@ static int init_percpu_area(unsigned int
>      char *p;
>  
>      if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
> -        return -EBUSY;
> +        return 0;
>  
>      if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
>          return -ENOMEM;
> @@ -76,9 +76,12 @@ static int cpu_percpu_callback(
>          break;
>      case CPU_UP_CANCELED:
>      case CPU_DEAD:
> -        free_percpu_area(cpu);
> +        if ( !park_offline_cpus )
> +            free_percpu_area(cpu);
>          break;
> -    default:
> +    case CPU_REMOVE:
> +        if ( park_offline_cpus )
> +            free_percpu_area(cpu);
>          break;
>      }
>  
> --- a/xen/arch/x86/smpboot.c
> +++ b/xen/arch/x86/smpboot.c
> @@ -63,6 +63,8 @@ static cpumask_t scratch_cpu0mask;
>  cpumask_t cpu_online_map __read_mostly;
>  EXPORT_SYMBOL(cpu_online_map);
>  
> +bool __read_mostly park_offline_cpus;

park_offline_cpus seems to be left to it's initial value (false) in
this patch. It might be good to mention in the commit message that
further patches will allow setting this value (I haven't looked yet,
but I assume so)?

> @@ -955,15 +970,19 @@ static int cpu_smpboot_alloc(unsigned in
>      if ( node != NUMA_NO_NODE )
>          memflags = MEMF_node(node);
>  
> -    stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, memflags);
> +    if ( stack_base[cpu] == NULL )
> +        stack_base[cpu] = alloc_xenheap_pages(STACK_ORDER, memflags);
>      if ( stack_base[cpu] == NULL )
>          goto out;
>      memguard_guard_stack(stack_base[cpu]);
>  
>      order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
> -    per_cpu(gdt_table, cpu) = gdt = alloc_xenheap_pages(order, memflags);
> +    gdt = per_cpu(gdt_table, cpu);
> +    if ( gdt == NULL )
> +        gdt = alloc_xenheap_pages(order, memflags);

gdt = per_cpu(gdt_table, cpu) ?: alloc_xenheap_pages(order, memflags);

Is equivalent and shorter AFAICT.

> @@ -368,16 +387,20 @@ static inline bool_t alloc_cpumask_var(c
>  {
>       return 1;
>  }
> +#define cond_alloc_cpumask_var alloc_cpumask_var
>  
>  static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
>  {
>       cpumask_clear(*mask);
>       return 1;
>  }
> +#define cond_zalloc_cpumask_var zalloc_cpumask_var
>  
>  static inline void free_cpumask_var(cpumask_var_t mask)
>  {
>  }
> +
> +#define FREE_CPUMASK_VAR(m) ((void)(m))

For correctness shouldn't this call free_cpumask_var?

>  #endif
>  
>  #if NR_CPUS > 1
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -162,6 +162,14 @@ void free_xenheap_pages(void *v, unsigne
>  bool scrub_free_pages(void);
>  #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
>  #define free_xenheap_page(v) (free_xenheap_pages(v,0))
> +
> +/* Free an allocation, and zero the pointer to it. */
> +#define FREE_XENHEAP_PAGES(p, o) do { \
> +    free_xenheap_pages(p, o);         \
> +    (p) = NULL;                       \
> +} while ( false )
> +#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
> +
>  /* Map machine page range in Xen virtual address space. */
>  int map_pages_to_xen(
>      unsigned long virt,
> --- a/xen/include/xen/xmalloc.h
> +++ b/xen/include/xen/xmalloc.h
> @@ -42,6 +42,12 @@
>  /* Free any of the above. */
>  extern void xfree(void *);
>  
> +/* Free an allocation, and zero the pointer to it. */
> +#define XFREE(p) do { \
> +    xfree(p);         \
> +    (p) = NULL;       \
> +} while ( false )

Do you need the do { ... } while construct here? Isn't it enough to
use ({ ... })?

Thanks, Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.