[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/4] convert more literal uses of cpumask_t to pointers



This is particularly relevant as the number of CPUs to be supported
increases (as recently happened for the default thereof).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -352,7 +352,7 @@ unmask_irq (unsigned int irq)
 
 
 static void
-iosapic_set_affinity (unsigned int irq, cpumask_t mask)
+iosapic_set_affinity (unsigned int irq, const cpumask_t *mask)
 {
 #ifdef CONFIG_SMP
        unsigned long flags;
@@ -366,10 +366,10 @@ iosapic_set_affinity (unsigned int irq, 
        irq &= (~IA64_IRQ_REDIRECTED);
        vec = irq_to_vector(irq);
 
-       if (cpus_empty(mask))
+       if (cpumask_empty(mask))
                return;
 
-       dest = cpu_physical_id(first_cpu(mask));
+       dest = cpu_physical_id(cpumask_first(mask));
 
        if (list_empty(&iosapic_intr_info[vec].rtes))
                return;                 /* not an IOSAPIC interrupt */
--- a/xen/arch/ia64/linux-xen/mca.c
+++ b/xen/arch/ia64/linux-xen/mca.c
@@ -1312,7 +1312,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
 #ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_disable_work);
 #else
-                       cpumask_raise_softirq(cpu_online_map,
+                       cpumask_raise_softirq(&cpu_online_map,
                                              CMC_DISABLE_SOFTIRQ);
 #endif
 
@@ -1383,7 +1383,7 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
 #ifndef XEN    /* XXX FIXME */
                        schedule_work(&cmc_enable_work);
 #else
-                       cpumask_raise_softirq(cpu_online_map,
+                       cpumask_raise_softirq(&cpu_online_map,
                                              CMC_ENABLE_SOFTIRQ);
 #endif
                        cmc_polling_enabled = 0;
@@ -1904,7 +1904,7 @@ ia64_mca_late_init(void)
 #ifndef XEN    /* XXX FIXME */
        schedule_work(&cmc_enable_work);
 #else
-       cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
+       cpumask_raise_softirq(&cpu_online_map, CMC_ENABLE_SOFTIRQ);
 #endif
 
        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -154,16 +154,16 @@ static int reprogram_hpet_evt_channel(
     return ret;
 }
 
-static void evt_do_broadcast(cpumask_t mask)
+static void evt_do_broadcast(cpumask_t *mask)
 {
     unsigned int cpu = smp_processor_id();
 
-    if ( cpu_test_and_clear(cpu, mask) )
+    if ( cpumask_test_and_clear_cpu(cpu, mask) )
         raise_softirq(TIMER_SOFTIRQ);
 
-    cpuidle_wakeup_mwait(&mask);
+    cpuidle_wakeup_mwait(mask);
 
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(mask) )
        cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
 }
 
@@ -202,7 +202,7 @@ again:
     }
 
     /* wakeup the cpus which have an expired event. */
-    evt_do_broadcast(mask);
+    evt_do_broadcast(&mask);
 
     if ( next_event != STIME_MAX )
     {
@@ -305,14 +305,14 @@ static void hpet_msi_end(unsigned int ir
 {
 }
 
-static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
+static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
     struct irq_desc * desc = irq_to_desc(irq);
     struct irq_cfg *cfg= desc->chip_data;
 
-    dest = set_desc_affinity(desc, &mask);
+    dest = set_desc_affinity(desc, mask);
     if (dest == BAD_APICID)
         return;
 
@@ -471,9 +471,7 @@ static void hpet_attach_channel(unsigned
     if ( ch->cpu != cpu )
         return;
 
-    /* set irq affinity */
-    irq_desc[ch->irq].handler->
-        set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
+    hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
 }
 
 static void hpet_detach_channel(unsigned int cpu,
@@ -495,9 +493,7 @@ static void hpet_detach_channel(unsigned
     }
 
     ch->cpu = first_cpu(ch->cpumask);
-    /* set irq affinity */
-    irq_desc[ch->irq].handler->
-        set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
+    hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
 }
 
 #include <asm/mc146818rtc.h>
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -697,13 +697,13 @@ set_ioapic_affinity_irq_desc(struct irq_
 }
 
 static void
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask mask)
+set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
 {
     struct irq_desc *desc;
 
     desc = irq_to_desc(irq);
 
-    set_ioapic_affinity_irq_desc(desc, &mask);
+    set_ioapic_affinity_irq_desc(desc, mask);
 }
 #endif /* CONFIG_SMP */
 
@@ -802,7 +802,7 @@ void /*__init*/ setup_ioapic_dest(void)
             irq = pin_2_irq(irq_entry, ioapic, pin);
             cfg = irq_cfg(irq);
             BUG_ON(cpus_empty(cfg->cpu_mask));
-            set_ioapic_affinity_irq(irq, cfg->cpu_mask);
+            set_ioapic_affinity_irq(irq, &cfg->cpu_mask);
         }
 
     }
@@ -2063,7 +2063,7 @@ static void __init check_timer(void)
     vector = FIRST_HIPRIORITY_VECTOR;
     clear_irq_vector(0);
 
-    if ((ret = bind_irq_vector(0, vector, mask_all)))
+    if ((ret = bind_irq_vector(0, vector, &mask_all)))
         printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", 
ret);
     
     irq_desc[0].status &= ~IRQ_DISABLED;
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -108,7 +108,7 @@ static void trace_irq_mask(u32 event, in
     trace_var(event, 1, sizeof(d), &d);
 }
 
-static int __init __bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
+static int __init __bind_irq_vector(int irq, int vector, const cpumask_t 
*cpu_mask)
 {
     cpumask_t online_mask;
     int cpu;
@@ -117,7 +117,7 @@ static int __init __bind_irq_vector(int 
     BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
-    cpus_and(online_mask, cpu_mask, cpu_online_map);
+    cpus_and(online_mask, *cpu_mask, cpu_online_map);
     if (cpus_empty(online_mask))
         return -EINVAL;
     if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, online_mask))
@@ -140,7 +140,7 @@ static int __init __bind_irq_vector(int 
     return 0;
 }
 
-int __init bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
+int __init bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
 {
     unsigned long flags;
     int ret;
@@ -582,7 +582,7 @@ void move_masked_irq(int irq)
      * For correct operation this depends on the caller masking the irqs.
      */
     if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
-        desc->handler->set_affinity(irq, desc->pending_mask);
+        desc->handler->set_affinity(irq, &desc->pending_mask);
 
     cpus_clear(desc->pending_mask);
 }
@@ -1409,7 +1409,7 @@ int pirq_guest_bind(struct vcpu *v, stru
         /* Attempt to bind the interrupt target to the correct CPU. */
         cpu_set(v->processor, cpumask);
         if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
-            desc->handler->set_affinity(irq, cpumask);
+            desc->handler->set_affinity(irq, &cpumask);
     }
     else if ( !will_share || !action->shareable )
     {
@@ -1963,7 +1963,7 @@ void fixup_irqs(void)
             desc->handler->disable(irq);
 
         if ( desc->handler->set_affinity )
-            desc->handler->set_affinity(irq, affinity);
+            desc->handler->set_affinity(irq, &affinity);
         else if ( !(warned++) )
             set_affinity = 0;
 
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -266,7 +266,7 @@ static void write_msi_msg(struct msi_des
     }
 }
 
-void set_msi_affinity(unsigned int irq, cpumask_t mask)
+void set_msi_affinity(unsigned int irq, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
@@ -274,7 +274,7 @@ void set_msi_affinity(unsigned int irq, 
     struct msi_desc *msi_desc = desc->msi_desc;
     struct irq_cfg *cfg = desc->chip_data;
 
-    dest = set_desc_affinity(desc, &mask);
+    dest = set_desc_affinity(desc, mask);
     if (dest == BAD_APICID || !msi_desc)
         return;
 
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -195,7 +195,7 @@ static void smp_send_timer_broadcast_ipi
 
     if ( !cpus_empty(mask) )
     {
-        cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
+        cpumask_raise_softirq(&mask, TIMER_SOFTIRQ);
     }
 }
 
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -207,10 +207,10 @@ static struct keyhandler reboot_machine_
     .desc = "reboot machine"
 };
 
-static void cpuset_print(char *set, int size, cpumask_t mask)
+static void cpuset_print(char *set, int size, const cpumask_t *mask)
 {
     *set++ = '{';
-    set += cpulist_scnprintf(set, size-2, mask);
+    set += cpulist_scnprintf(set, size-2, *mask);
     *set++ = '}';
     *set++ = '\0';
 }
@@ -244,7 +244,7 @@ static void dump_domains(unsigned char k
     {
         unsigned int i;
         printk("General information for domain %u:\n", d->domain_id);
-        cpuset_print(tmpstr, sizeof(tmpstr), *d->domain_dirty_cpumask);
+        cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
         printk("    refcnt=%d dying=%d nr_pages=%d xenheap_pages=%d "
                "dirty_cpus=%s max_pages=%u\n",
                atomic_read(&d->refcnt), d->is_dying,
@@ -278,9 +278,9 @@ static void dump_domains(unsigned char k
                    v->pause_flags, v->poll_evtchn,
                    vcpu_info(v, evtchn_upcall_pending),
                    vcpu_info(v, evtchn_upcall_mask));
-            cpuset_print(tmpstr, sizeof(tmpstr), *v->vcpu_dirty_cpumask);
+            cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
             printk("dirty_cpus=%s ", tmpstr);
-            cpuset_print(tmpstr, sizeof(tmpstr), *v->cpu_affinity);
+            cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
             printk("cpu_affinity=%s\n", tmpstr);
             arch_dump_vcpu_info(v);
             periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -117,7 +117,7 @@ static void force_quiescent_state(struct
          */
         cpumask = rcp->cpumask;
         cpu_clear(rdp->cpu, cpumask);
-        cpumask_raise_softirq(cpumask, SCHEDULE_SOFTIRQ);
+        cpumask_raise_softirq(&cpumask, SCHEDULE_SOFTIRQ);
     }
 }
 
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -310,7 +310,7 @@ __runq_tickle(unsigned int cpu, struct c
 
     /* Send scheduler interrupts to designated CPUs */
     if ( !cpus_empty(mask) )
-        cpumask_raise_softirq(mask, SCHEDULE_SOFTIRQ);
+        cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
 }
 
 static void
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -68,15 +68,16 @@ void open_softirq(int nr, softirq_handle
     softirq_handlers[nr] = handler;
 }
 
-void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
+void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
 {
     int cpu;
+    cpumask_t send_mask = CPU_MASK_NONE;
 
-    for_each_cpu_mask(cpu, mask)
-        if ( test_and_set_bit(nr, &softirq_pending(cpu)) )
-            cpu_clear(cpu, mask);
+    for_each_cpu_mask(cpu, *mask)
+        if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+            cpu_set(cpu, send_mask);
 
-    smp_send_event_check_mask(&mask);
+    smp_send_event_check_mask(&send_mask);
 }
 
 void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -344,7 +344,7 @@ static void amd_iommu_reset_event_log(st
     set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
 }
 
-static void iommu_msi_set_affinity(unsigned int irq, cpumask_t mask)
+static void iommu_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
@@ -355,7 +355,7 @@ static void iommu_msi_set_affinity(unsig
     u8 dev = PCI_SLOT(iommu->bdf & 0xff);
     u8 func = PCI_FUNC(iommu->bdf & 0xff);
 
-    dest = set_desc_affinity(desc, &mask);
+    dest = set_desc_affinity(desc, mask);
 
     if ( dest == BAD_APICID )
     {
@@ -591,7 +591,7 @@ static void enable_iommu(struct amd_iomm
     register_iommu_event_log_in_mmio_space(iommu);
     register_iommu_exclusion_range(iommu);
 
-    iommu_msi_set_affinity(iommu->irq, cpu_online_map);
+    iommu_msi_set_affinity(iommu->irq, &cpu_online_map);
     amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
 
     set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -998,7 +998,7 @@ static void dma_msi_end(unsigned int irq
     ack_APIC_irq();
 }
 
-static void dma_msi_set_affinity(unsigned int irq, cpumask_t mask)
+static void dma_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
@@ -1009,7 +1009,7 @@ static void dma_msi_set_affinity(unsigne
     struct irq_cfg *cfg = desc->chip_data;
 
 #ifdef CONFIG_X86
-    dest = set_desc_affinity(desc, &mask);
+    dest = set_desc_affinity(desc, mask);
     if (dest == BAD_APICID){
         dprintk(XENLOG_ERR VTDPREFIX, "Set iommu interrupt affinity error!\n");
         return;
@@ -1984,7 +1984,7 @@ static int init_vtd_hw(void)
         iommu = drhd->iommu;
 
         cfg = irq_cfg(iommu->irq);
-        dma_msi_set_affinity(iommu->irq, cfg->cpu_mask);
+        dma_msi_set_affinity(iommu->irq, &cfg->cpu_mask);
 
         clear_fault_bits(iommu);
 
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -176,7 +176,7 @@ void move_masked_irq(int irq);
 
 int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
 
-int bind_irq_vector(int irq, int vector, cpumask_t domain);
+int bind_irq_vector(int irq, int vector, const cpumask_t *);
 
 void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
 
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -77,7 +77,7 @@ struct msi_desc;
 /* Helper functions */
 extern void mask_msi_irq(unsigned int irq);
 extern void unmask_msi_irq(unsigned int irq);
-extern void set_msi_affinity(unsigned int vector, cpumask_t mask);
+extern void set_msi_affinity(unsigned int vector, const cpumask_t *);
 extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
 extern void pci_disable_msi(struct msi_desc *desc);
 extern void pci_cleanup_msi(struct pci_dev *pdev);
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -45,7 +45,7 @@ struct hw_interrupt_type {
     void (*disable)(unsigned int irq);
     void (*ack)(unsigned int irq);
     void (*end)(unsigned int irq, u8 vector);
-    void (*set_affinity)(unsigned int irq, cpumask_t mask);
+    void (*set_affinity)(unsigned int irq, const cpumask_t *);
 };
 
 typedef const struct hw_interrupt_type hw_irq_controller;
@@ -176,11 +176,6 @@ static inline void set_native_irq_info(u
     irq_desc[irq].affinity = *mask;
 }
 
-static inline void set_irq_info(int irq, cpumask_t mask)
-{
-    set_native_irq_info(irq, &mask);
-}
-
 unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);
 
 #endif /* __XEN_IRQ_H__ */
--- a/xen/include/xen/softirq.h
+++ b/xen/include/xen/softirq.h
@@ -27,7 +27,7 @@ asmlinkage void do_softirq(void);
 void open_softirq(int nr, softirq_handler handler);
 void softirq_init(void);
 
-void cpumask_raise_softirq(cpumask_t mask, unsigned int nr);
+void cpumask_raise_softirq(const cpumask_t *, unsigned int nr);
 void cpu_raise_softirq(unsigned int cpu, unsigned int nr);
 void raise_softirq(unsigned int nr);
 


Attachment: cpumask-pointer.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.