WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Rename irq_cfg->domain to irq_cfg->cpu_ma

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Rename irq_cfg->domain to irq_cfg->cpu_mask
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 03 Sep 2010 09:50:19 -0700
Delivery-date: Fri, 03 Sep 2010 09:50:43 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1283153906 -3600
# Node ID 20920c12bc4815b1f755786c0924393809664807
# Parent  1a2731fb447ee8345e0b7e73b549f2d7ba1d82f4
Rename irq_cfg->domain to irq_cfg->cpu_mask

From: Sheng Yang <sheng.yang@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/genapic/delivery.c              |    4 +-
 xen/arch/x86/genapic/x2apic.c                |    2 -
 xen/arch/x86/i8259.c                         |    2 -
 xen/arch/x86/io_apic.c                       |   16 +++++-----
 xen/arch/x86/irq.c                           |   40 +++++++++++++--------------
 xen/arch/x86/msi.c                           |    2 -
 xen/arch/x86/smpboot.c                       |    2 -
 xen/drivers/passthrough/vtd/iommu.c          |    2 -
 xen/include/asm-x86/genapic.h                |   16 +++++-----
 xen/include/asm-x86/irq.h                    |    4 +-
 xen/include/asm-x86/mach-generic/mach_apic.h |    2 -
 xen/include/xen/irq.h                        |    2 -
 12 files changed, 47 insertions(+), 47 deletions(-)

diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/genapic/delivery.c
--- a/xen/arch/x86/genapic/delivery.c   Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/genapic/delivery.c   Mon Aug 30 08:38:26 2010 +0100
@@ -31,7 +31,7 @@ cpumask_t target_cpus_flat(void)
        return cpu_online_map;
 }
 
-cpumask_t vector_allocation_domain_flat(int cpu)
+cpumask_t vector_allocation_cpumask_flat(int cpu)
 {
        return cpu_online_map;
 } 
@@ -64,7 +64,7 @@ cpumask_t target_cpus_phys(void)
        return cpu_online_map;
 }
 
-cpumask_t vector_allocation_domain_phys(int cpu)
+cpumask_t vector_allocation_cpumask_phys(int cpu)
 {
        return cpumask_of_cpu(cpu);
 }
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/genapic/x2apic.c
--- a/xen/arch/x86/genapic/x2apic.c     Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/genapic/x2apic.c     Mon Aug 30 08:38:26 2010 +0100
@@ -94,7 +94,7 @@ cpumask_t target_cpus_x2apic(void)
     return cpu_online_map;
 }
 
-cpumask_t vector_allocation_domain_x2apic(int cpu)
+cpumask_t vector_allocation_cpumask_x2apic(int cpu)
 {
     return cpumask_of_cpu(cpu);
 }
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/i8259.c
--- a/xen/arch/x86/i8259.c      Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/i8259.c      Mon Aug 30 08:38:26 2010 +0100
@@ -392,7 +392,7 @@ void __init init_IRQ(void)
         
         desc->handler = &i8259A_irq_type;
         per_cpu(vector_irq, cpu)[FIRST_LEGACY_VECTOR + irq] = irq;
-        cfg->domain = cpumask_of_cpu(cpu);
+        cfg->cpu_mask= cpumask_of_cpu(cpu);
         cfg->vector = FIRST_LEGACY_VECTOR + irq;
     }
     
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/io_apic.c    Mon Aug 30 08:38:26 2010 +0100
@@ -412,7 +412,7 @@ fastcall void smp_irq_move_cleanup_inter
         if (!cfg->move_cleanup_count)
             goto unlock;
 
-        if (vector == cfg->vector && cpu_isset(me, cfg->domain))
+        if (vector == cfg->vector && cpu_isset(me, cfg->cpu_mask))
             goto unlock;
 
         irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -441,7 +441,7 @@ static void send_cleanup_vector(struct i
 {
     cpumask_t cleanup_mask;
 
-    cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+    cpus_and(cleanup_mask, cfg->old_cpu_mask, cpu_online_map);
     cfg->move_cleanup_count = cpus_weight(cleanup_mask);
     genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
 
@@ -460,7 +460,7 @@ void irq_complete_move(struct irq_desc *
     vector = get_irq_regs()->entry_vector;
     me = smp_processor_id();
 
-    if (vector == cfg->vector && cpu_isset(me, cfg->domain))
+    if (vector == cfg->vector && cpu_isset(me, cfg->cpu_mask))
         send_cleanup_vector(cfg);
 }
 
@@ -488,7 +488,7 @@ unsigned int set_desc_affinity(struct ir
         return BAD_APICID;
 
     cpus_copy(desc->affinity, mask);
-    cpus_and(dest_mask, desc->affinity, cfg->domain);
+    cpus_and(dest_mask, desc->affinity, cfg->cpu_mask);
 
     return cpu_mask_to_apicid(dest_mask);
 }
@@ -638,8 +638,8 @@ void /*__init*/ setup_ioapic_dest(void)
                 continue;
             irq = pin_2_irq(irq_entry, ioapic, pin);
             cfg = irq_cfg(irq);
-            BUG_ON(cpus_empty(cfg->domain));
-            set_ioapic_affinity_irq(irq, cfg->domain);
+            BUG_ON(cpus_empty(cfg->cpu_mask));
+            set_ioapic_affinity_irq(irq, cfg->cpu_mask);
         }
 
     }
@@ -1003,7 +1003,7 @@ static void __init setup_IO_APIC_irqs(vo
             }
             cfg = irq_cfg(irq);
             SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
-                cpu_mask_to_apicid(cfg->domain));
+                cpu_mask_to_apicid(cfg->cpu_mask));
             spin_lock_irqsave(&ioapic_lock, flags);
             io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
             io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
@@ -2446,7 +2446,7 @@ int ioapic_guest_write(unsigned long phy
     rte.vector = cfg->vector;
 
     SET_DEST(rte.dest.dest32, rte.dest.logical.logical_dest,
-        cpu_mask_to_apicid(cfg->domain));
+        cpu_mask_to_apicid(cfg->cpu_mask));
 
     io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0));
     io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1));
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/irq.c        Mon Aug 30 08:38:26 2010 +0100
@@ -74,39 +74,39 @@ void unlock_vector_lock(void)
     spin_unlock(&vector_lock);
 }
 
-static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
-{
-    cpumask_t mask;
+static int __bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
+{
+    cpumask_t online_mask;
     int cpu;
     struct irq_cfg *cfg = irq_cfg(irq);
 
     BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
-    cpus_and(mask, domain, cpu_online_map);
-    if (cpus_empty(mask))
+    cpus_and(online_mask, cpu_mask, cpu_online_map);
+    if (cpus_empty(online_mask))
         return -EINVAL;
-    if ((cfg->vector == vector) && cpus_equal(cfg->domain, mask))
+    if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, online_mask))
         return 0;
     if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 
         return -EBUSY;
-    for_each_cpu_mask(cpu, mask)
+    for_each_cpu_mask(cpu, online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     cfg->vector = vector;
-    cfg->domain = mask;
+    cfg->cpu_mask = online_mask;
     irq_status[irq] = IRQ_USED;
     if (IO_APIC_IRQ(irq))
         irq_vector[irq] = vector;
     return 0;
 }
 
-int bind_irq_vector(int irq, int vector, cpumask_t domain)
+int bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
 {
     unsigned long flags;
     int ret;
 
     spin_lock_irqsave(&vector_lock, flags);
-    ret = __bind_irq_vector(irq, vector, domain);
+    ret = __bind_irq_vector(irq, vector, cpu_mask);
     spin_unlock_irqrestore(&vector_lock, flags);
     return ret;
 }
@@ -179,13 +179,13 @@ static void __clear_irq_vector(int irq)
     BUG_ON(!cfg->vector);
 
     vector = cfg->vector;
-    cpus_and(tmp_mask, cfg->domain, cpu_online_map);
+    cpus_and(tmp_mask, cfg->cpu_mask, cpu_online_map);
 
     for_each_cpu_mask(cpu, tmp_mask)
         per_cpu(vector_irq, cpu)[vector] = -1;
 
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->domain);
+    cpus_clear(cfg->cpu_mask);
     init_one_irq_status(irq);
 
     if (likely(!cfg->move_in_progress))
@@ -257,8 +257,8 @@ static void init_one_irq_cfg(struct irq_
 static void init_one_irq_cfg(struct irq_cfg *cfg)
 {
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->domain);
-    cpus_clear(cfg->old_domain);
+    cpus_clear(cfg->cpu_mask);
+    cpus_clear(cfg->old_cpu_mask);
 }
 
 int init_irq_data(void)
@@ -354,7 +354,7 @@ int __assign_irq_vector(int irq, struct 
     old_vector = irq_to_vector(irq);
     if (old_vector) {
         cpus_and(tmp_mask, mask, cpu_online_map);
-        cpus_and(tmp_mask, cfg->domain, tmp_mask);
+        cpus_and(tmp_mask, cfg->cpu_mask, tmp_mask);
         if (!cpus_empty(tmp_mask)) {
             cfg->vector = old_vector;
             return 0;
@@ -369,7 +369,7 @@ int __assign_irq_vector(int irq, struct 
         int new_cpu;
         int vector, offset;
 
-        tmp_mask = vector_allocation_domain(cpu);
+        tmp_mask = vector_allocation_cpumask(cpu);
         cpus_and(tmp_mask, tmp_mask, cpu_online_map);
 
         vector = current_vector;
@@ -395,12 +395,12 @@ next:
         current_offset = offset;
         if (old_vector) {
             cfg->move_in_progress = 1;
-            cpus_copy(cfg->old_domain, cfg->domain);
+            cpus_copy(cfg->old_cpu_mask, cfg->cpu_mask);
         }
         for_each_cpu_mask(new_cpu, tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         cfg->vector = vector;
-        cpus_copy(cfg->domain, tmp_mask);
+        cpus_copy(cfg->cpu_mask, tmp_mask);
 
         irq_status[irq] = IRQ_USED;
             if (IO_APIC_IRQ(irq))
@@ -424,7 +424,7 @@ int assign_irq_vector(int irq)
     ret = __assign_irq_vector(irq, cfg, TARGET_CPUS);
     if (!ret) {
         ret = cfg->vector;
-        cpus_copy(desc->affinity, cfg->domain);
+        cpus_copy(desc->affinity, cfg->cpu_mask);
     }
     spin_unlock_irqrestore(&vector_lock, flags);
     return ret;
@@ -445,7 +445,7 @@ void __setup_vector_irq(int cpu)
     /* Mark the inuse vectors */
     for (irq = 0; irq < nr_irqs; ++irq) {
         cfg = irq_cfg(irq);
-        if (!cpu_isset(cpu, cfg->domain))
+        if (!cpu_isset(cpu, cfg->cpu_mask))
             continue;
         vector = irq_to_vector(irq);
         per_cpu(vector_irq, cpu)[vector] = irq;
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/msi.c
--- a/xen/arch/x86/msi.c        Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/msi.c        Mon Aug 30 08:38:26 2010 +0100
@@ -124,7 +124,7 @@ void msi_compose_msg(struct pci_dev *pde
     cpumask_t domain;
     struct irq_cfg *cfg = irq_cfg(irq);
     int vector = cfg->vector;
-    domain = cfg->domain;
+    domain = cfg->cpu_mask;
 
     if ( cpus_empty( domain ) ) {
         dprintk(XENLOG_ERR,"%s, compose msi message error!!\n", __func__);
diff -r 1a2731fb447e -r 20920c12bc48 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/arch/x86/smpboot.c    Mon Aug 30 08:38:26 2010 +0100
@@ -1015,7 +1015,7 @@ void __init smp_intr_init(void)
         irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
         per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
         irq_cfg[irq].vector = FIRST_HIPRIORITY_VECTOR + seridx + 1;
-        irq_cfg[irq].domain = (cpumask_t)CPU_MASK_ALL;
+        irq_cfg[irq].cpu_mask = (cpumask_t)CPU_MASK_ALL;
     }
 
     /* IPI for cleanuping vectors after irq move */
diff -r 1a2731fb447e -r 20920c12bc48 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Mon Aug 30 08:38:26 2010 +0100
@@ -1837,7 +1837,7 @@ static int init_vtd_hw(void)
         }
 
         cfg = irq_cfg(iommu->irq);
-        dma_msi_set_affinity(iommu->irq, cfg->domain);
+        dma_msi_set_affinity(iommu->irq, cfg->cpu_mask);
 
         clear_fault_bits(iommu);
 
diff -r 1a2731fb447e -r 20920c12bc48 xen/include/asm-x86/genapic.h
--- a/xen/include/asm-x86/genapic.h     Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/include/asm-x86/genapic.h     Mon Aug 30 08:38:26 2010 +0100
@@ -34,7 +34,7 @@ struct genapic {
        void (*init_apic_ldr)(void);
        void (*clustered_apic_check)(void);
        cpumask_t (*target_cpus)(void);
-       cpumask_t (*vector_allocation_domain)(int cpu);
+       cpumask_t (*vector_allocation_cpumask)(int cpu);
        unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
        void (*send_IPI_mask)(const cpumask_t *mask, int vector);
     void (*send_IPI_self)(int vector);
@@ -58,14 +58,14 @@ unsigned int cpu_mask_to_apicid_flat(cpu
 unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask);
 void send_IPI_mask_flat(const cpumask_t *mask, int vector);
 void send_IPI_self_flat(int vector);
-cpumask_t vector_allocation_domain_flat(int cpu);
+cpumask_t vector_allocation_cpumask_flat(int cpu);
 #define GENAPIC_FLAT \
        .int_delivery_mode = dest_LowestPrio, \
        .int_dest_mode = 1 /* logical delivery */, \
        .init_apic_ldr = init_apic_ldr_flat, \
        .clustered_apic_check = clustered_apic_check_flat, \
        .target_cpus = target_cpus_flat, \
-       .vector_allocation_domain = vector_allocation_domain_flat, \
+       .vector_allocation_cpumask = vector_allocation_cpumask_flat, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
        .send_IPI_mask = send_IPI_mask_flat, \
        .send_IPI_self = send_IPI_self_flat
@@ -80,14 +80,14 @@ void send_IPI_mask_x2apic_phys(const cpu
 void send_IPI_mask_x2apic_phys(const cpumask_t *mask, int vector);
 void send_IPI_mask_x2apic_cluster(const cpumask_t *mask, int vector);
 void send_IPI_self_x2apic(int vector);
-cpumask_t vector_allocation_domain_x2apic(int cpu);
+cpumask_t vector_allocation_cpumask_x2apic(int cpu);
 #define GENAPIC_X2APIC_PHYS \
        .int_delivery_mode = dest_Fixed, \
        .int_dest_mode = 0 /* physical delivery */, \
        .init_apic_ldr = init_apic_ldr_x2apic_phys, \
        .clustered_apic_check = clustered_apic_check_x2apic, \
        .target_cpus = target_cpus_x2apic, \
-       .vector_allocation_domain = vector_allocation_domain_x2apic, \
+       .vector_allocation_cpumask = vector_allocation_cpumask_x2apic, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_phys, \
        .send_IPI_mask = send_IPI_mask_x2apic_phys,       \
        .send_IPI_self = send_IPI_self_x2apic
@@ -98,7 +98,7 @@ cpumask_t vector_allocation_domain_x2api
     .init_apic_ldr = init_apic_ldr_x2apic_cluster, \
     .clustered_apic_check = clustered_apic_check_x2apic, \
     .target_cpus = target_cpus_x2apic, \
-    .vector_allocation_domain = vector_allocation_domain_x2apic, \
+    .vector_allocation_cpumask = vector_allocation_cpumask_x2apic, \
     .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster, \
     .send_IPI_mask = send_IPI_mask_x2apic_cluster,       \
     .send_IPI_self = send_IPI_self_x2apic
@@ -109,14 +109,14 @@ unsigned int cpu_mask_to_apicid_phys(cpu
 unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask);
 void send_IPI_mask_phys(const cpumask_t *mask, int vector);
 void send_IPI_self_phys(int vector);
-cpumask_t vector_allocation_domain_phys(int cpu);
+cpumask_t vector_allocation_cpumask_phys(int cpu);
 #define GENAPIC_PHYS \
        .int_delivery_mode = dest_Fixed, \
        .int_dest_mode = 0 /* physical delivery */, \
        .init_apic_ldr = init_apic_ldr_phys, \
        .clustered_apic_check = clustered_apic_check_phys, \
        .target_cpus = target_cpus_phys, \
-       .vector_allocation_domain = vector_allocation_domain_phys, \
+       .vector_allocation_cpumask = vector_allocation_cpumask_phys, \
        .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
        .send_IPI_mask = send_IPI_mask_phys, \
        .send_IPI_self = send_IPI_self_phys
diff -r 1a2731fb447e -r 20920c12bc48 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/include/asm-x86/irq.h Mon Aug 30 08:38:26 2010 +0100
@@ -25,8 +25,8 @@
 
 struct irq_cfg {
         int  vector;
-        cpumask_t domain;
-        cpumask_t old_domain;
+        cpumask_t cpu_mask;
+        cpumask_t old_cpu_mask;
         unsigned move_cleanup_count;
         u8 move_in_progress : 1;
 };
diff -r 1a2731fb447e -r 20920c12bc48 
xen/include/asm-x86/mach-generic/mach_apic.h
--- a/xen/include/asm-x86/mach-generic/mach_apic.h      Mon Aug 30 08:31:57 
2010 +0100
+++ b/xen/include/asm-x86/mach-generic/mach_apic.h      Mon Aug 30 08:38:26 
2010 +0100
@@ -16,7 +16,7 @@
 #define init_apic_ldr (genapic->init_apic_ldr)
 #define clustered_apic_check (genapic->clustered_apic_check) 
 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define vector_allocation_domain(cpu) (genapic->vector_allocation_domain(cpu))
+#define vector_allocation_cpumask(cpu) 
(genapic->vector_allocation_cpumask(cpu))
 
 static inline void enable_apic_mode(void)
 {
diff -r 1a2731fb447e -r 20920c12bc48 xen/include/xen/irq.h
--- a/xen/include/xen/irq.h     Mon Aug 30 08:31:57 2010 +0100
+++ b/xen/include/xen/irq.h     Mon Aug 30 08:38:26 2010 +0100
@@ -117,7 +117,7 @@ extern int request_irq_vector(unsigned i
 
 struct irq_cfg {
         int  vector;
-        cpumask_t domain;
+        cpumask_t cpu_mask;
 };
 
 extern struct irq_cfg irq_cfg[];

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Rename irq_cfg->domain to irq_cfg->cpu_mask, Xen patchbot-unstable <=