[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 4/5] xen/arm: physical irq follow virtual irq



Migrate physical irqs to the same physical cpu that is running the vcpu
expected to receive the irqs. That is done when enabling irqs, when the
guest writes to GICD_ITARGETSR and when Xen migrates a vcpu to a
different pcpu.

Introduce a new arch specific function, arch_move_irqs, that is empty on
x86 and implements the vgic irq migration code on ARM.
arch_move_irqs is going to be called by from sched.c.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>

---

Changes in v6:
- use vgic_get_target_vcpu instead of _vgic_get_target_vcpu in
arch_move_irqs.

Changes in v5:
- prettify vgic_move_irqs;
- rename vgic_move_irqs to arch_move_irqs;
- introduce helper function irq_set_affinity.
---
 xen/arch/arm/gic.c        |   18 ++++++++++++++++--
 xen/arch/arm/vgic.c       |   30 ++++++++++++++++++++++++++++++
 xen/include/asm-arm/gic.h |    1 +
 xen/include/asm-x86/irq.h |    2 ++
 4 files changed, 49 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 54610ce..1965f86 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -192,9 +192,23 @@ static void gic_guest_irq_end(struct irq_desc *desc)
     /* Deactivation happens in maintenance interrupt / via GICV */
 }
 
-static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t 
*cpu_mask)
 {
-    BUG();
+    volatile unsigned char *bytereg;
+    unsigned int mask;
+
+    if ( desc == NULL || cpumask_empty(cpu_mask) )
+        return;
+
+    spin_lock(&gic.lock);
+
+    mask = gic_cpu_mask(cpu_mask);
+
+    /* Set target CPU mask (RAZ/WI on uniprocessor) */
+    bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
+    bytereg[desc->irq] = mask;
+
+    spin_unlock(&gic.lock);
 }
 
 /* XXX different for level vs edge */
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index b2f922c..5a504ad 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -438,6 +438,32 @@ static void vgic_migrate_irq(struct vcpu *old, struct vcpu 
*new, unsigned int ir
     spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
 }
 
+static inline void irq_set_affinity(struct irq_desc *desc,
+                                    const cpumask_t *cpu_mask)
+{
+    if ( desc != NULL )
+        desc->handler->set_affinity(desc, cpu_mask);
+}
+
+void arch_move_irqs(struct vcpu *v)
+{
+    const cpumask_t *cpu_mask = cpumask_of(v->processor);
+    struct domain *d = v->domain;
+    struct pending_irq *p;
+    struct vcpu *v_target;
+    int i;
+
+    for ( i = 32; i < d->arch.vgic.nr_lines; i++ )
+    {
+        v_target = vgic_get_target_vcpu(v, i);
+        if ( v_target == v )
+        {
+            p = irq_to_pending(v, i);
+            irq_set_affinity(p->desc, cpu_mask);
+        }
+    }
+}
+
 static void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
 {
     const unsigned long mask = r;
@@ -493,6 +519,7 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, 
int n)
         }
         if ( p->desc != NULL )
         {
+            irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             spin_lock_irqsave(&p->desc->lock, flags);
             p->desc->handler->enable(p->desc);
             spin_unlock_irqrestore(&p->desc->lock, flags);
@@ -681,6 +708,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info)
             unsigned int irq, target, old_target;
             unsigned long old_target_mask;
             struct vcpu *v_target, *v_old;
+            struct pending_irq *p;
 
             target = i % 8;
             old_target_mask = byte_read(rank->itargets[REG_RANK_INDEX(8, 
gicd_reg - GICD_ITARGETSR)], 0, i/8);
@@ -692,6 +720,8 @@ static int vgic_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info)
                 v_target = v->domain->vcpu[target];
                 v_old = v->domain->vcpu[old_target];
                 vgic_migrate_irq(v_old, v_target, irq);
+                p = irq_to_pending(v_target, irq);
+                irq_set_affinity(p->desc, cpumask_of(v_target->processor));
             }
             i += 8 - target;
         }
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index ba9ba9b..dcc2f1c 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -228,6 +228,7 @@ int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
 void gic_clear_lrs(struct vcpu *v);
 
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+void arch_move_irqs(struct vcpu *v);
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index 9066d38..d3c55f3 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -197,4 +197,6 @@ void cleanup_domain_irq_mapping(struct domain *);
 
 bool_t cpu_has_pending_apic_eoi(void);
 
+static inline void arch_move_irqs(struct vcpu *v) { }
+
 #endif /* _ASM_HW_IRQ_H */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.