[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH, v2] reduce 'd' debug key's global impact



Pick something rare like SPURIOUS_APIC_VECTOR. In that particular handler
you can even put your check on the unlikely path that checks for and acks a
real vectored interrupt.

 K.

On 06/05/2010 07:46, "Jan Beulich" <JBeulich@xxxxxxxxxx> wrote:

> Actually I have to withdraw this version too - calling on_selected_cpus()
> from a call-function handler just isn't a valid thing to do.
> 
> The best alternative I can currently think of is to hook this as an
> unlikely code path into an existing IPI handler (e.g. for x86 the
> event check one), since sending a simple IPI doesn't have similar
> problems.
> 
> Jan
> 
>>>> "Jan Beulich" <JBeulich@xxxxxxxxxx> 05.05.10 17:34 >>>
> On large systems, dumping state may cause time management to get
> stalled for so long a period that it wouldn't recover. Therefore alter
> the state dumping logic to alternatively block each CPU as it prints
> rather than one CPU for a very long time (using the alternative key
> handling toggle introduced with an earlier patch). Also don't print
> useless data (e.g. the hypervisor context of the interrupt that is
> used for triggering the printing, but isn't part of the context that's
> actually interesting).
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
> 
> --- 2010-05-04.orig/xen/arch/ia64/linux-xen/smp.c 2010-05-05
> 16:42:36.000000000 +0200
> +++ 2010-05-04/xen/arch/ia64/linux-xen/smp.c 2010-05-04 13:22:27.000000000
> +0200
> @@ -189,7 +189,7 @@ handle_IPI (int irq, void *dev_id, struc
>       * At this point the structure may be gone unless
>       * wait is true.
>       */
> -          (*func)(info);
> +          (*func)(info ?: regs);
>  
>      /* Notify the sending CPU that the task is done.  */
>      mb();
> --- 2010-05-04.orig/xen/arch/x86/smp.c 2010-05-05 16:42:36.000000000 +0200
> +++ 2010-05-04/xen/arch/x86/smp.c 2010-05-04 13:22:27.000000000 +0200
> @@ -395,7 +395,7 @@ static void __smp_call_function_interrup
>  
>      if ( call_data.wait )
>      {
> -        (*func)(info);
> +        (*func)(info ?: get_irq_regs());
>          mb();
>          atomic_inc(&call_data.finished);
>      }
> @@ -403,7 +403,7 @@ static void __smp_call_function_interrup
>      {
>          mb();
>          atomic_inc(&call_data.started);
> -        (*func)(info);
> +        (*func)(info ?: get_irq_regs());
>      }
>  
>      irq_exit();
> --- 2010-05-04.orig/xen/common/keyhandler.c 2010-05-04 13:21:53.000000000
> +0200
> +++ 2010-05-04/xen/common/keyhandler.c 2010-05-05 16:49:24.000000000 +0200
> @@ -71,14 +71,44 @@ static struct keyhandler show_handlers_k
>      .desc = "show this message"
>  };
>  
> -static void __dump_execstate(void *unused)
> +static cpumask_t dump_execstate_mask;
> +
> +static void __dump_execstate(void *_regs)
>  {
> -    dump_execution_state();
> -    printk("*** Dumping CPU%d guest state: ***\n", smp_processor_id());
> +    struct cpu_user_regs *regs = _regs;
> +    unsigned int cpu = smp_processor_id();
> +
> +    if ( !guest_mode(regs) )
> +    {
> +        printk("\n*** Dumping CPU%u host state: ***\n", cpu);
> +        show_execution_state(regs);
> +    }
>      if ( is_idle_vcpu(current) )
> -        printk("No guest context (CPU is idle).\n");
> +        printk("No guest context (CPU%u is idle).\n", cpu);
>      else
> +    {
> +        printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n",
> +               smp_processor_id(), current->domain->domain_id,
> +               current->vcpu_id);
>          show_execution_state(guest_cpu_user_regs());
> +    }
> +
> +    if ( !alt_key_handling )
> +        return;
> +
> +    cpu = cycle_cpu(cpu, dump_execstate_mask);
> +    if ( cpu < NR_CPUS )
> +    {
> +        cpu_clear(cpu, dump_execstate_mask);
> +        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 0);
> +    }
> +    else
> +    {
> +        printk("\n");
> +
> +        console_end_sync();
> +        watchdog_enable();
> +    }
>  }
>  
>  static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
> @@ -91,15 +121,20 @@ static void dump_registers(unsigned char
>  
>      printk("'%c' pressed -> dumping registers\n", key);
>  
> +    if ( alt_key_handling )
> +        cpus_andnot(dump_execstate_mask, cpu_online_map,
> +                    cpumask_of_cpu(smp_processor_id()));
> +
>      /* Get local execution state out immediately, in case we get stuck. */
> -    printk("\n*** Dumping CPU%d host state: ***\n", smp_processor_id());
> -    __dump_execstate(NULL);
> +    __dump_execstate(regs);
> +
> +    if ( alt_key_handling )
> +        return;
>  
>      for_each_online_cpu ( cpu )
>      {
>          if ( cpu == smp_processor_id() )
>              continue;
> -        printk("\n*** Dumping CPU%d host state: ***\n", cpu);
>          on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
>      }
>  
> --- 2010-05-04.orig/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2010-05-05
> 16:42:36.000000000 +0200
> +++ 2010-05-04/xen/include/asm-ia64/linux-xen/asm/ptrace.h 2010-05-04
> 13:22:27.000000000 +0200
> @@ -280,7 +280,7 @@ struct switch_stack {
>  # define ia64_task_regs(t)  (((struct pt_regs *) ((char *) (t) +
> IA64_STK_OFFSET)) - 1)
>  # define ia64_psr(regs)   ((struct ia64_psr *) &(regs)->cr_ipsr)
>  #ifdef XEN
> -# define guest_mode(regs)  (ia64_psr(regs)->cpl != 0)
> +# define guest_mode(regs)  (ia64_psr(regs)->cpl && !ia64_psr(regs)->vm)
>  # define guest_kernel_mode(regs) (ia64_psr(regs)->cpl == CONFIG_CPL0_EMUL)
>  # define vmx_guest_kernel_mode(regs) (ia64_psr(regs)->cpl == 0)
>  # define regs_increment_iip(regs)     \
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.