[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] Debugging Dom0 kernel over serial



What kernel version?

Below is a patch against 3.2.y that allows for debug using kdb over hvc

Some bits of this were recently upstreamed as part of some perf-tools
work in pvops...but the debug_core.c, and hvc_console.c didn't make
it.

With this patch, you can add the kernel parameter
kgdboc=hvc0

Alternately, at runtime by doing the following:
echo hvc0 > /sys/module/kgdboc/parameters/kgdboc

To break into the debugger, you need to press the magic SysRq key
sequence "Alt+SysRq+g"

While you are in the debugger, you are in "polling" console mode. As
such, there seems to be a limitation on how fast you can type
commands. I found that when I typed two letters too fast, it just
printed the help text again.



diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d5e0e0a..88815a1 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -65,6 +65,7 @@

 #include "xen-ops.h"
 #include "mmu.h"
+#include "smp.h"
 #include "multicalls.h"

 EXPORT_SYMBOL_GPL(hypercall_page);
@@ -768,6 +769,12 @@ static void set_xen_basic_apic_ops(void)
  apic->icr_write = xen_apic_icr_write;
  apic->wait_icr_idle = xen_apic_wait_icr_idle;
  apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
+
+ apic->send_IPI_allbutself = xen_send_IPI_allbutself;
+ apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
+ apic->send_IPI_mask = xen_send_IPI_mask;
+ apic->send_IPI_all = xen_send_IPI_all;
+ apic->send_IPI_self = xen_send_IPI_self;
 }

 #endif
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3061244..d8928a1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -436,8 +436,8 @@ static void xen_smp_send_reschedule(int cpu)
  xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 }

-static void xen_send_IPI_mask(const struct cpumask *mask,
-      enum ipi_vector vector)
+void xen_send_IPI_mask(const struct cpumask *mask,
+      int vector)
 {
  unsigned cpu;

@@ -466,6 +466,39 @@ static void xen_smp_send_call_function_single_ipi(int cpu)
   XEN_CALL_FUNCTION_SINGLE_VECTOR);
 }

+void xen_send_IPI_all(int vector)
+{
+ xen_send_IPI_mask(cpu_online_mask, vector);
+}
+
+void xen_send_IPI_self(int vector)
+{
+ xen_send_IPI_one(smp_processor_id(), vector);
+}
+
+void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
+{
+ unsigned cpu;
+ unsigned int this_cpu = smp_processor_id();
+
+ if (!(num_online_cpus() > 1))
+ return;
+
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (this_cpu == cpu)
+ continue;
+
+ xen_smp_send_call_function_single_ipi(cpu);
+ }
+}
+
+void xen_send_IPI_allbutself(int vector)
+{
+ xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
+}
+
+
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 {
  irq_enter();
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644
index 0000000..8981a76
--- /dev/null
+++ b/arch/x86/xen/smp.h
@@ -0,0 +1,12 @@
+#ifndef _XEN_SMP_H
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+      int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void physflat_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+#endif
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 58ca7ce..4addc80 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -754,13 +754,10 @@ int hvc_poll_init(struct tty_driver *driver, int
line, char *options)

 static int hvc_poll_get_char(struct tty_driver *driver, int line)
 {
- struct tty_struct *tty = driver->ttys[0];
- struct hvc_struct *hp = tty->driver_data;
  int n;
  char ch;

- n = hp->ops->get_chars(hp->vtermno, &ch, 1);
-
+ n = cons_ops[last_hvc]->get_chars(vtermnos[last_hvc], &ch, 1);
  if (n == 0)
  return NO_POLL_CHAR;

@@ -769,12 +766,10 @@ static int hvc_poll_get_char(struct tty_driver
*driver, int line)

 static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch)
 {
- struct tty_struct *tty = driver->ttys[0];
- struct hvc_struct *hp = tty->driver_data;
  int n;

  do {
- n = hp->ops->put_chars(hp->vtermno, &ch, 1);
+ n = cons_ops[last_hvc]->put_chars(vtermnos[last_hvc], &ch, 1);
  } while (n <= 0);
 }
 #endif
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index cefd4a1..df904a5 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -581,12 +581,14 @@ return_normal:
  kgdb_roundup_cpus(flags);
 #endif

+#ifndef CONFIG_XEN
  /*
  * Wait for the other CPUs to be notified and be waiting for us:
  */
  while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
  atomic_read(&slaves_in_kgdb)) != online_cpus)
  cpu_relax();
+#endif

  /*
  * At this point the primary processor is completely





On Wed, Jun 6, 2012 at 3:55 PM, James Paton <paton@xxxxxxxxxxx> wrote:
>
> Thanks everyone for your help.
>
> I tried the suggestion of leaving ttyS1 out of the Xen boot options and using 
> that. I confirmed that I can echo things back and forth through that 
> connection from the host OS. I compiled a custom version of gdb to target 
> x86_64-linux-gnu so I could do remote debugging from the host. No luck. I 
> also tried setting console=ttyS1 for the dom0 kernel -- same outcome.
>
> Next, using exactly the same settings, I booted the dom0 kernel bare (no Xen 
> this time) and tried the same thing. This time, it worked. I can connect the 
> debugger and it breaks as expected. So I'm thinking debugging while running 
> Xen over serial is a dead end, unless I figure out a way to do it over hvc. I 
> think for now I'll just use printk.
>
> -- Jim
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.