[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] xen 4 only seeing one keyboard and mouse



On Fri, Sep 03, 2010 at 07:50:06PM +0100, M A Young wrote:
> On Tue, 31 Aug 2010, Konrad Rzeszutek Wilk wrote:
> 
> >If you could, can you instrument it to print the cfg->domain, before the 
> >'vector_allocation_domain'
> >is called, and as well instrument the assign_irq_vector similary to what you 
> >did with Xen?
> >
> >And also instrument the 'dest' value. Basically the idea is to get an idea 
> >of what the
> >per_cpu(vector) gets set during the bootup for legacy IRQs. Similary to what 
> >you did
> >with Xen.
> 
> The kernel code I was working with (2.6.32) doesn't have the
> vector_allocation_domain section . I am attaching the debugging
> output I did get and the patch I used.

OK, so based on the output the IO APIC pins for you two IRQs should have 
destination set
to 1.

.. snip..
_IO_APIC_irq: cfg->domain=-1
setup_IO_APIC_irq: dest=1
IOAPIC[0]: Set routing entry (2-1 -> 0x31 -> IRQ 1 Mode:0 Active:0)
setup_IO_APIC_irq: cfg->domain=-1
setup_IO_APIC_irq: dest=1
IOAPIC[0]: Set routing entry (2-12 -> 0x3c -> IRQ 12 Mode:0 Active:0)

BUT when the IO APIC is being printed:

 01 003 0    0    0   0   0    1    1    31
 0c 003 0    0    0   0   0    1    1    3C


They are set to dest = 3!?

Somehow the IO APIC is being programmed without using the setup_IO_APIC_irq
and its friends. Also the per_cpu(vector_irq, 1)[0x31] = 1 is set.

So, for the second problem, I think the __setup_vector_irq is the one
that sets the vectors on the second CPU to correspond to the right IRQs.

But I am not sure how the IOAPIC pin for all IRQs below 16 get set to '3'.
There is something happening between the initial call to init IO_APIC IRQs
and when it is being printed that sets the destination to a new value.

I've piggybacked on your debug patch and added some extra stuff to see if the 
__setup_vector_irq
is responsible for setting the new per_cpu. Those printk's _might_ not work as
all of that is being run on a secondary CPU that is being initialized..?

For the IO APIC programming I added a printk/debug_stack by the ioapic_write
to see who and when sets those pins on the IOAPIC to 3.

Here is the patch:

diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ec4e874..37482fe 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -69,7 +69,7 @@
 #include <asm/xen/pci.h>
 
 #include <asm/xen/pci.h>
-
+#include <linux/kernel.h>
 #define __apicdebuginit(type) static type __init
 #define for_each_irq_pin(entry, head) \
        for (entry = head; entry; entry = entry->next)
@@ -486,6 +486,11 @@ __ioapic_write_entry(int apic, int pin, struct 
IO_APIC_route_entry e)
 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
        unsigned long flags;
+       if (pin == 1 || pin == 0xc) {
+               printk(KERN_INFO "Reprogramming PIN%d, dest=%d\n", pin, e.dest);
+               if (e.dest > 1)
+                       dump_stack();
+       }
        spin_lock_irqsave(&ioapic_lock, flags);
        __ioapic_write_entry(apic, pin, e);
        spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -1198,6 +1203,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const 
struct cpumask *mask)
        if (old_vector) {
                cpumask_and(tmp_mask, mask, cpu_online_mask);
                cpumask_and(tmp_mask, cfg->domain, tmp_mask);
+               printk(KERN_INFO "old_vector: %d  mask: %x\n", old_vector, 
tmp_mask->bits[0]);
                if (!cpumask_empty(tmp_mask)) {
                        free_cpumask_var(tmp_mask);
                        return 0;
@@ -1214,6 +1220,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const 
struct cpumask *mask)
 
                vector = current_vector;
                offset = current_offset;
+               printk(KERN_INFO "vector: %d, mask: %x, cpu: %d per_cpu:%x\n",
+                       vector, tmp_mask->bits[0], cpu, per_cpu(vector_irq, 
cpu)[vector]);
 next:
                vector += 8;
                if (vector >= first_system_vector) {
@@ -1237,8 +1245,11 @@ next:
                        cfg->move_in_progress = 1;
                        cpumask_copy(cfg->old_domain, cfg->domain);
                }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
+                       printk(KERN_WARNING "__assign_irq_vector: setting 
vector_irq[%d]=%d for cpu=%d\n",
+                               vector, irq, new_cpu);
+                       }
                cfg->vector = vector;
                cpumask_copy(cfg->domain, tmp_mask);
                err = 0;
@@ -1304,6 +1315,8 @@ void __setup_vector_irq(int cpu)
                if (!cpumask_test_cpu(cpu, cfg->domain))
                        continue;
                vector = cfg->vector;
+               printk(KERN_INFO "%s: vector: %d on CPU %d set to IRQ: %d\n",
+                               __FUNCTION__, vector, cpu, irq);
                per_cpu(vector_irq, cpu)[vector] = irq;
        }
        /* Mark the free vectors */
@@ -1313,8 +1326,11 @@ void __setup_vector_irq(int cpu)
                        continue;
 
                cfg = irq_cfg(irq);
-               if (!cpumask_test_cpu(cpu, cfg->domain))
+               if (!cpumask_test_cpu(cpu, cfg->domain)) {
+                       printk(KERN_INFO "%s: vector %d on CPU %d reset b/c not 
in affinity mask (%d)\n",
+                                       __FUNCTION__, vector, cpu, 
cfg->domain->bits[0]);
                        per_cpu(vector_irq, cpu)[vector] = -1;
+               }
        }
 }
 
@@ -1452,7 +1468,20 @@ int setup_ioapic_entry(int apic_id, int irq,
                entry->mask = 1;
        return 0;
 }
-
+static void dump_vectors(const char *prefix) {
+       int cpu;
+       int vector;
+
+       for (vector = 0x30; vector < 0x3f; vector++) {
+               for_each_cpu_and(cpu, 0xff, cpu_online_mask) {
+                       if (per_cpu(vector_irq, cpu)[vector] != -1)
+                               printk(KERN_INFO "%s [vec:%d,cpu:%d] = 
irq:%d\n",
+                                       prefix,
+                                       vector, cpu,
+                                       per_cpu(vector_irq, cpu)[vector]);
+               }
+       }
+}
 static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct 
irq_desc *desc,
                              int trigger, int polarity)
 {
@@ -1465,10 +1494,15 @@ static void setup_IO_APIC_irq(int apic_id, int pin, 
unsigned int irq, struct irq
 
        cfg = desc->chip_data;
 
+       printk(KERN_WARNING "setup_IO_APIC_irq: cfg->domain=%d (vector: %d)\n", 
cfg->domain->bits[0], cfg->vector);
+
+       dump_vectors("PRE");
        if (assign_irq_vector(irq, cfg, apic->target_cpus()))
                return;
+       dump_vectors("PAST");
 
        dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+       printk(KERN_WARNING "setup_IO_APIC_irq: dest=%d\n", dest);
 
        apic_printk(APIC_VERBOSE,KERN_DEBUG
                    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "

Attachment: debug-ioapic-irq-2-12.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.