WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3/4] pass struct irq_desc * to all other IRQ accessor

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 3/4] pass struct irq_desc * to all other IRQ accessors
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Tue, 20 Sep 2011 16:44:00 +0100
Delivery-date: Tue, 20 Sep 2011 08:46:16 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
This is again because the descriptor is generally more useful (with
the IRQ number being accessible in it if necessary) and going forward
will hopefully allow to remove all direct accesses to the IRQ
descriptor array, in turn making it possible to make this some other,
more efficient data structure.

This additionally makes the .end() accessor optional, noting that in a
number of cases the functions were empty.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -276,7 +276,7 @@ set_rte (unsigned int gsi, unsigned int 
 }
 
 static void
-nop (unsigned int vector)
+nop (struct irq_desc *desc)
 {
        /* do nothing... */
 }
@@ -300,13 +300,13 @@ kexec_disable_iosapic(void)
 }
 
 static void
-mask_irq (unsigned int irq)
+mask_irq (struct irq_desc *desc)
 {
        unsigned long flags;
        char __iomem *addr;
        u32 low32;
        int rte_index;
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
        if (list_empty(&iosapic_intr_info[vec].rtes))
@@ -326,13 +326,13 @@ mask_irq (unsigned int irq)
 }
 
 static void
-unmask_irq (unsigned int irq)
+unmask_irq (struct irq_desc *desc)
 {
        unsigned long flags;
        char __iomem *addr;
        u32 low32;
        int rte_index;
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
        if (list_empty(&iosapic_intr_info[vec].rtes))
@@ -408,19 +408,19 @@ iosapic_set_affinity (struct irq_desc *d
  */
 
 static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
+iosapic_startup_level_irq (struct irq_desc *desc)
 {
-       unmask_irq(irq);
+       unmask_irq(desc);
        return 0;
 }
 
 static void
-iosapic_end_level_irq (unsigned int irq)
+iosapic_end_level_irq (struct irq_desc *desc)
 {
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
-       move_irq(irq);
+       move_irq(desc->irq);
        list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
                iosapic_eoi(rte->addr, vec);
 }
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -49,9 +49,6 @@ static unsigned int __read_mostly num_hp
 
 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
 
-static unsigned int *__read_mostly irq_channel;
-#define irq_to_channel(irq)   irq_channel[irq]
-
 unsigned long __read_mostly hpet_address;
 
 /*
@@ -232,26 +229,20 @@ static void hpet_interrupt_handler(int i
     ch->event_handler(ch);
 }
 
-static void hpet_msi_unmask(unsigned int irq)
+static void hpet_msi_unmask(struct irq_desc *desc)
 {
     u32 cfg;
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
+    struct hpet_event_channel *ch = desc->action->dev_id;
 
     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
     cfg |= HPET_TN_FSB;
     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
 }
 
-static void hpet_msi_mask(unsigned int irq)
+static void hpet_msi_mask(struct irq_desc *desc)
 {
     u32 cfg;
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
+    struct hpet_event_channel *ch = desc->action->dev_id;
 
     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
     cfg &= ~HPET_TN_FSB;
@@ -271,30 +262,21 @@ static void hpet_msi_read(struct hpet_ev
     msg->address_hi = 0;
 }
 
-static unsigned int hpet_msi_startup(unsigned int irq)
+static unsigned int hpet_msi_startup(struct irq_desc *desc)
 {
-    hpet_msi_unmask(irq);
+    hpet_msi_unmask(desc);
     return 0;
 }
 
-static void hpet_msi_shutdown(unsigned int irq)
-{
-    hpet_msi_mask(irq);
-}
+#define hpet_msi_shutdown hpet_msi_mask
 
-static void hpet_msi_ack(unsigned int irq)
+static void hpet_msi_ack(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
     ack_APIC_irq();
 }
 
-static void hpet_msi_end(unsigned int irq, u8 vector)
-{
-}
-
 static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
     struct msi_msg msg;
@@ -323,7 +305,6 @@ static hw_irq_controller hpet_msi_type =
     .enable        = hpet_msi_unmask,
     .disable    = hpet_msi_mask,
     .ack        = hpet_msi_ack,
-    .end        = hpet_msi_end,
     .set_affinity   = hpet_msi_set_affinity,
 };
 
@@ -335,14 +316,13 @@ static void __hpet_setup_msi_irq(struct 
     hpet_msi_write(desc->action->dev_id, &msg);
 }
 
-static int __init hpet_setup_msi_irq(unsigned int irq)
+static int __init hpet_setup_msi_irq(unsigned int irq, struct 
hpet_event_channel *ch)
 {
     int ret;
     irq_desc_t *desc = irq_to_desc(irq);
 
     desc->handler = &hpet_msi_type;
-    ret = request_irq(irq, hpet_interrupt_handler,
-                      0, "HPET", hpet_events + irq_channel[irq]);
+    ret = request_irq(irq, hpet_interrupt_handler, 0, "HPET", ch);
     if ( ret < 0 )
         return ret;
 
@@ -358,12 +338,9 @@ static int __init hpet_assign_irq(unsign
     if ( (irq = create_irq()) < 0 )
         return irq;
 
-    irq_channel[irq] = idx;
-
-    if ( hpet_setup_msi_irq(irq) )
+    if ( hpet_setup_msi_irq(irq, hpet_events + idx) )
     {
         destroy_irq(irq);
-        irq_channel[irq] = -1;
         return -EINVAL;
     }
 
@@ -511,11 +488,6 @@ void __init hpet_broadcast_init(void)
     if ( hpet_rate == 0 )
         return;
 
-    irq_channel = xmalloc_array(unsigned int, nr_irqs);
-    BUG_ON(irq_channel == NULL);
-    for ( i = 0; i < nr_irqs; i++ )
-        irq_channel[i] = -1;
-
     cfg = hpet_read32(HPET_CFG);
 
     hpet_fsb_cap_lookup();
@@ -527,9 +499,6 @@ void __init hpet_broadcast_init(void)
     }
     else
     {
-        xfree(irq_channel);
-        irq_channel = NULL;
-
         hpet_id = hpet_read32(HPET_ID);
         if ( !(hpet_id & HPET_ID_LEGSUP) )
             return;
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -85,18 +85,18 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BU
 
 static DEFINE_SPINLOCK(i8259A_lock);
 
-static void mask_and_ack_8259A_irq(unsigned int irq);
+static void mask_and_ack_8259A_irq(struct irq_desc *);
 
-static unsigned int startup_8259A_irq(unsigned int irq)
+static unsigned int startup_8259A_irq(struct irq_desc *desc)
 {
-    enable_8259A_irq(irq);
+    enable_8259A_irq(desc);
     return 0; /* never anything pending */
 }
 
-static void end_8259A_irq(unsigned int irq, u8 vector)
+static void end_8259A_irq(struct irq_desc *desc, u8 vector)
 {
-    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-        enable_8259A_irq(irq);
+    if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+        enable_8259A_irq(desc);
 }
 
 static struct hw_interrupt_type __read_mostly i8259A_irq_type = {
@@ -133,28 +133,28 @@ static unsigned int cached_irq_mask = 0x
  */
 unsigned int __read_mostly io_apic_irqs;
 
-void disable_8259A_irq(unsigned int irq)
+void disable_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int mask = 1 << irq;
+    unsigned int mask = 1 << desc->irq;
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
     cached_irq_mask |= mask;
-    if (irq & 8)
+    if (desc->irq & 8)
         outb(cached_A1,0xA1);
     else
         outb(cached_21,0x21);
     spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
-void enable_8259A_irq(unsigned int irq)
+void enable_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int mask = ~(1 << irq);
+    unsigned int mask = ~(1 << desc->irq);
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
     cached_irq_mask &= mask;
-    if (irq & 8)
+    if (desc->irq & 8)
         outb(cached_A1,0xA1);
     else
         outb(cached_21,0x21);
@@ -226,9 +226,9 @@ static inline int i8259A_irq_real(unsign
  * first, _then_ send the EOI, and the order of EOI
  * to the two 8259s is important!
  */
-static void mask_and_ack_8259A_irq(unsigned int irq)
+static void mask_and_ack_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int irqmask = 1 << irq;
+    unsigned int irqmask = 1 << desc->irq;
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
@@ -252,15 +252,15 @@ static void mask_and_ack_8259A_irq(unsig
     cached_irq_mask |= irqmask;
 
  handle_real_irq:
-    if (irq & 8) {
+    if (desc->irq & 8) {
         inb(0xA1);              /* DUMMY - (do we need this?) */
         outb(cached_A1,0xA1);
-        outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
+        outb(0x60 + (desc->irq & 7), 0xA0);/* 'Specific EOI' to slave */
         outb(0x62,0x20);        /* 'Specific EOI' to master-IRQ2 */
     } else {
         inb(0x21);              /* DUMMY - (do we need this?) */
         outb(cached_21,0x21);
-        outb(0x60+irq,0x20);    /* 'Specific EOI' to master */
+        outb(0x60 + desc->irq, 0x20);/* 'Specific EOI' to master */
     }
     spin_unlock_irqrestore(&i8259A_lock, flags);
     return;
@@ -269,7 +269,7 @@ static void mask_and_ack_8259A_irq(unsig
     /*
      * this is the slow path - should happen rarely.
      */
-    if (i8259A_irq_real(irq))
+    if (i8259A_irq_real(desc->irq))
         /*
          * oops, the IRQ _is_ in service according to the
          * 8259A - not spurious, go handle it.
@@ -283,7 +283,7 @@ static void mask_and_ack_8259A_irq(unsig
          * lets ACK and report it. [once per IRQ]
          */
         if (!(spurious_irq_mask & irqmask)) {
-            printk("spurious 8259A interrupt: IRQ%d.\n", irq);
+            printk("spurious 8259A interrupt: IRQ%d.\n", desc->irq);
             spurious_irq_mask |= irqmask;
         }
         /*
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -436,21 +436,21 @@ static void __level_IO_APIC_irq (unsigne
     __modify_IO_APIC_irq(irq, 0x00008000, 0);
 }
 
-static void mask_IO_APIC_irq (unsigned int irq)
+static void mask_IO_APIC_irq(struct irq_desc *desc)
 {
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    __mask_IO_APIC_irq(irq);
+    __mask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
-static void unmask_IO_APIC_irq (unsigned int irq)
+static void unmask_IO_APIC_irq(struct irq_desc *desc)
 {
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    __unmask_IO_APIC_irq(irq);
+    __unmask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -1145,7 +1145,7 @@ static void __init setup_IO_APIC_irqs(vo
                 ioapic_register_intr(irq, IOAPIC_AUTO);
 
                 if (!apic && platform_legacy_irq(irq))
-                    disable_8259A_irq(irq);
+                    disable_8259A_irq(irq_to_desc(irq));
             }
             cfg = irq_cfg(irq);
             SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
@@ -1170,7 +1170,7 @@ static void __init setup_ExtINT_IRQ0_pin
 
     memset(&entry,0,sizeof(entry));
 
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
 
     /* mask LVT0 */
     apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
@@ -1199,7 +1199,7 @@ static void __init setup_ExtINT_IRQ0_pin
      */
     ioapic_write_entry(apic, pin, 0, entry);
 
-    enable_8259A_irq(0);
+    enable_8259A_irq(irq_to_desc(0));
 }
 
 static inline void UNEXPECTED_IO_APIC(void)
@@ -1627,18 +1627,18 @@ static int __init timer_irq_works(void)
  * This is not complete - we should be able to fake
  * an edge even if it isn't on the 8259A...
  */
-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
+static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc)
 {
     int was_pending = 0;
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    if (platform_legacy_irq(irq)) {
-        disable_8259A_irq(irq);
-        if (i8259A_irq_pending(irq))
+    if (platform_legacy_irq(desc->irq)) {
+        disable_8259A_irq(desc);
+        if (i8259A_irq_pending(desc->irq))
             was_pending = 1;
     }
-    __unmask_IO_APIC_irq(irq);
+    __unmask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 
     return was_pending;
@@ -1649,16 +1649,14 @@ static unsigned int startup_edge_ioapic_
  * interrupt for real. This prevents IRQ storms from unhandled
  * devices.
  */
-static void ack_edge_ioapic_irq(unsigned int irq)
+static void ack_edge_ioapic_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-    
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
 
     if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
         == (IRQ_PENDING | IRQ_DISABLED))
-        mask_IO_APIC_irq(irq);
+        mask_IO_APIC_irq(desc);
     ack_APIC_irq();
 }
 
@@ -1676,9 +1674,9 @@ static void ack_edge_ioapic_irq(unsigned
  * generic IRQ layer and by the fact that an unacked local
  * APIC does not accept IRQs.
  */
-static unsigned int startup_level_ioapic_irq (unsigned int irq)
+static unsigned int startup_level_ioapic_irq(struct irq_desc *desc)
 {
-    unmask_IO_APIC_irq(irq);
+    unmask_IO_APIC_irq(desc);
 
     return 0; /* don't check for pending */
 }
@@ -1726,11 +1724,10 @@ static bool_t io_apic_level_ack_pending(
     return 0;
 }
 
-static void mask_and_ack_level_ioapic_irq (unsigned int irq)
+static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
     int i;
-    struct irq_desc *desc = irq_to_desc(irq);
 
     irq_complete_move(desc);
 
@@ -1738,7 +1735,7 @@ static void mask_and_ack_level_ioapic_ir
         return;
 
     if ( !directed_eoi_enabled )
-        mask_IO_APIC_irq(irq);
+        mask_IO_APIC_irq(desc);
 
 /*
  * It appears there is an erratum which affects at least version 0x11
@@ -1759,7 +1756,7 @@ static void mask_and_ack_level_ioapic_ir
  * operation to prevent an edge-triggered interrupt escaping meanwhile.
  * The idea is from Manfred Spraul.  --macro
  */
-    i = IO_APIC_VECTOR(irq);
+    i = IO_APIC_VECTOR(desc->irq);
 
     v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 
@@ -1768,19 +1765,19 @@ static void mask_and_ack_level_ioapic_ir
     if ( directed_eoi_enabled )
         return;
 
-    if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
-       !io_apic_level_ack_pending(irq))
+    if ((desc->status & IRQ_MOVE_PENDING) &&
+       !io_apic_level_ack_pending(desc->irq))
         move_masked_irq(desc);
 
     if ( !(v & (1 << (i & 0x1f))) ) {
         spin_lock(&ioapic_lock);
-        __edge_IO_APIC_irq(irq);
-        __level_IO_APIC_irq(irq);
+        __edge_IO_APIC_irq(desc->irq);
+        __level_IO_APIC_irq(desc->irq);
         spin_unlock(&ioapic_lock);
     }
 }
 
-static void end_level_ioapic_irq (unsigned int irq, u8 vector)
+static void end_level_ioapic_irq(struct irq_desc *desc, u8 vector)
 {
     unsigned long v;
     int i;
@@ -1789,23 +1786,21 @@ static void end_level_ioapic_irq (unsign
     {
         if ( directed_eoi_enabled )
         {
-            struct irq_desc *desc = irq_to_desc(irq);
-
             if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
             {
-                eoi_IO_APIC_irq(irq);
+                eoi_IO_APIC_irq(desc->irq);
                 return;
             }
 
-            mask_IO_APIC_irq(irq);
-            eoi_IO_APIC_irq(irq);
+            mask_IO_APIC_irq(desc);
+            eoi_IO_APIC_irq(desc->irq);
             if ( (desc->status & IRQ_MOVE_PENDING) &&
-                 !io_apic_level_ack_pending(irq) )
+                 !io_apic_level_ack_pending(desc->irq) )
                 move_masked_irq(desc);
         }
 
-        if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-            unmask_IO_APIC_irq(irq);
+        if ( !(desc->status & IRQ_DISABLED) )
+            unmask_IO_APIC_irq(desc);
 
         return;
     }
@@ -1829,7 +1824,7 @@ static void end_level_ioapic_irq (unsign
  * operation to prevent an edge-triggered interrupt escaping meanwhile.
  * The idea is from Manfred Spraul.  --macro
  */
-    i = IO_APIC_VECTOR(irq);
+    i = IO_APIC_VECTOR(desc->irq);
 
     /* Manually EOI the old vector if we are moving to the new */
     if ( vector && i != vector )
@@ -1843,30 +1838,21 @@ static void end_level_ioapic_irq (unsign
 
     ack_APIC_irq();
 
-    if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
-            !io_apic_level_ack_pending(irq))
-        move_native_irq(irq);
+    if ( (desc->status & IRQ_MOVE_PENDING) &&
+         !io_apic_level_ack_pending(desc->irq) )
+        move_native_irq(desc);
 
     if (!(v & (1 << (i & 0x1f)))) {
         spin_lock(&ioapic_lock);
-        __mask_IO_APIC_irq(irq);
-        __edge_IO_APIC_irq(irq);
-        __level_IO_APIC_irq(irq);
-        if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-            __unmask_IO_APIC_irq(irq);
+        __mask_IO_APIC_irq(desc->irq);
+        __edge_IO_APIC_irq(desc->irq);
+        __level_IO_APIC_irq(desc->irq);
+        if ( !(desc->status & IRQ_DISABLED) )
+            __unmask_IO_APIC_irq(desc->irq);
         spin_unlock(&ioapic_lock);
     }
 }
 
-static void disable_edge_ioapic_irq(unsigned int irq)
-{
-}
-
-static void end_edge_ioapic_irq(unsigned int irq, u8 vector)
-{
-}
-
-
 /*
  * Level and edge triggered IO-APIC interrupts need different handling,
  * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -1878,11 +1864,10 @@ static void end_edge_ioapic_irq(unsigned
 static hw_irq_controller ioapic_edge_type = {
     .typename  = "IO-APIC-edge",
     .startup   = startup_edge_ioapic_irq,
-    .shutdown  = disable_edge_ioapic_irq,
+    .shutdown  = irq_shutdown_none,
     .enable    = unmask_IO_APIC_irq,
-    .disable   = disable_edge_ioapic_irq,
+    .disable   = irq_disable_none,
     .ack               = ack_edge_ioapic_irq,
-    .end               = end_edge_ioapic_irq,
     .set_affinity      = set_ioapic_affinity_irq,
 };
 
@@ -1897,26 +1882,24 @@ static hw_irq_controller ioapic_level_ty
     .set_affinity      = set_ioapic_affinity_irq,
 };
 
-static unsigned int startup_msi_irq(unsigned int irq)
+static unsigned int startup_msi_irq(struct irq_desc *desc)
 {
-    unmask_msi_irq(irq);
+    unmask_msi_irq(desc);
     return 0;
 }
 
-static void ack_msi_irq(unsigned int irq)
+static void ack_msi_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
 
     if ( msi_maskable_irq(desc->msi_desc) )
         ack_APIC_irq(); /* ACKTYPE_NONE */
 }
 
-static void end_msi_irq(unsigned int irq, u8 vector)
+static void end_msi_irq(struct irq_desc *desc, u8 vector)
 {
-    if ( !msi_maskable_irq(irq_desc[irq].msi_desc) )
+    if ( !msi_maskable_irq(desc->msi_desc) )
         ack_APIC_irq(); /* ACKTYPE_EOI */
 }
 
@@ -1946,7 +1929,7 @@ static inline void init_IO_APIC_traps(vo
             make_8259A_irq(irq);
 }
 
-static void enable_lapic_irq(unsigned int irq)
+static void enable_lapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
 
@@ -1954,7 +1937,7 @@ static void enable_lapic_irq(unsigned in
     apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
 }
 
-static void disable_lapic_irq(unsigned int irq)
+static void disable_lapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
 
@@ -1962,13 +1945,11 @@ static void disable_lapic_irq(unsigned i
     apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
 }
 
-static void ack_lapic_irq(unsigned int irq)
+static void ack_lapic_irq(struct irq_desc *desc)
 {
     ack_APIC_irq();
 }
 
-#define end_lapic_irq end_edge_ioapic_irq
-
 static hw_irq_controller lapic_irq_type = {
     .typename  = "local-APIC-edge",
     .startup   = NULL, /* startup_irq() not used for IRQ0 */
@@ -1976,7 +1957,6 @@ static hw_irq_controller lapic_irq_type 
     .enable    = enable_lapic_irq,
     .disable   = disable_lapic_irq,
     .ack               = ack_lapic_irq,
-    .end               = end_lapic_irq,
 };
 
 /*
@@ -2051,7 +2031,7 @@ static void __init check_timer(void)
     /*
      * get/set the timer IRQ vector:
      */
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
     vector = FIRST_HIPRIORITY_VECTOR;
     clear_irq_vector(0);
 
@@ -2071,7 +2051,7 @@ static void __init check_timer(void)
     init_8259A(1);
     /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
     /*timer_ack = 1;*/
-    /*enable_8259A_irq(0);*/
+    /*enable_8259A_irq(irq_to_desc(0));*/
 
     pin1  = find_isa_irq_pin(0, mp_INT);
     apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2085,7 +2065,7 @@ static void __init check_timer(void)
         /*
          * Ok, does IRQ0 through the IOAPIC work?
          */
-        unmask_IO_APIC_irq(0);
+        unmask_IO_APIC_irq(irq_to_desc(0));
         if (timer_irq_works()) {
             local_irq_restore(flags);
             return;
@@ -2125,10 +2105,10 @@ static void __init check_timer(void)
 
     printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
     irq_desc[0].handler = &lapic_irq_type;
     apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);      /* Fixed mode */
-    enable_8259A_irq(0);
+    enable_8259A_irq(irq_to_desc(0));
 
     if (timer_irq_works()) {
         local_irq_restore(flags);
@@ -2401,7 +2381,7 @@ int io_apic_set_pci_routing (int ioapic,
     ioapic_register_intr(irq, edge_level);
 
     if (!ioapic && platform_legacy_irq(irq))
-        disable_8259A_irq(irq);
+        disable_8259A_irq(desc);
 
     spin_lock_irqsave(&ioapic_lock, flags);
     __ioapic_write_entry(ioapic, pin, 0, entry);
@@ -2410,7 +2390,7 @@ int io_apic_set_pci_routing (int ioapic,
 
     spin_lock(&desc->lock);
     if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST)))
-        desc->handler->startup(irq);
+        desc->handler->startup(desc);
     spin_unlock_irqrestore(&desc->lock, flags);
 
     return 0;
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -192,7 +192,7 @@ static void dynamic_irq_cleanup(unsigned
 
     spin_lock_irqsave(&desc->lock, flags);
     desc->status  |= IRQ_DISABLED;
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
     action = desc->action;
     desc->action  = NULL;
     desc->msi_desc = NULL;
@@ -347,25 +347,20 @@ static void __do_IRQ_guest(int vector);
 
 void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
 
-static void enable_none(unsigned int vector) { }
-static void end_none(unsigned int irq, u8 vector) { }
-static unsigned int startup_none(unsigned int vector) { return 0; }
-static void disable_none(unsigned int vector) { }
-static void ack_none(unsigned int irq)
+void irq_actor_none(struct irq_desc *desc) { }
+unsigned int irq_startup_none(struct irq_desc *desc) { return 0; }
+static void ack_none(struct irq_desc *desc)
 {
-    ack_bad_irq(irq);
+    ack_bad_irq(desc->irq);
 }
 
-#define shutdown_none   disable_none
-
 hw_irq_controller no_irq_type = {
     "none",
-    startup_none,
-    shutdown_none,
-    enable_none,
-    disable_none,
+    irq_startup_none,
+    irq_shutdown_none,
+    irq_enable_none,
+    irq_disable_none,
     ack_none,
-    end_none
 };
 
 static vmask_t *irq_get_used_vector_mask(int irq)
@@ -585,19 +580,17 @@ void move_masked_irq(struct irq_desc *de
     cpus_clear(desc->pending_mask);
 }
 
-void move_native_irq(int irq)
+void move_native_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     if (likely(!(desc->status & IRQ_MOVE_PENDING)))
         return;
 
     if (unlikely(desc->status & IRQ_DISABLED))
         return;
 
-    desc->handler->disable(irq);
+    desc->handler->disable(desc);
     move_masked_irq(desc);
-    desc->handler->enable(irq);
+    desc->handler->enable(desc);
 }
 
 /* For re-setting irq interrupt affinity for specific irq */
@@ -654,7 +647,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
     desc = irq_to_desc(irq);
 
     spin_lock(&desc->lock);
-    desc->handler->ack(irq);
+    desc->handler->ack(desc);
 
     if ( likely(desc->status & IRQ_GUEST) )
     {
@@ -664,7 +657,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
             s_time_t now = NOW();
             if ( now < (desc->rl_quantum_start + MILLISECS(10)) )
             {
-                desc->handler->disable(irq);
+                desc->handler->disable(desc);
                 /*
                  * If handler->disable doesn't actually mask the interrupt, a 
                  * disabled irq still can fire. This check also avoids 
possible 
@@ -716,7 +709,8 @@ asmlinkage void do_IRQ(struct cpu_user_r
     desc->status &= ~IRQ_INPROGRESS;
 
  out:
-    desc->handler->end(irq, regs->entry_vector);
+    if ( desc->handler->end )
+        desc->handler->end(desc, regs->entry_vector);
  out_no_end:
     spin_unlock(&desc->lock);
     irq_exit();
@@ -733,7 +727,7 @@ static void irq_ratelimit_timer_fn(void 
     list_for_each_entry_safe ( desc, tmp, &irq_ratelimit_list, rl_link )
     {
         spin_lock(&desc->lock);
-        desc->handler->enable(desc->irq);
+        desc->handler->enable(desc);
         list_del(&desc->rl_link);
         INIT_LIST_HEAD(&desc->rl_link);
         spin_unlock(&desc->lock);
@@ -796,7 +790,7 @@ void __init release_irq(unsigned int irq
     action = desc->action;
     desc->action  = NULL;
     desc->status |= IRQ_DISABLED;
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
     spin_unlock_irqrestore(&desc->lock,flags);
 
     /* Wait to make sure it's not being used on another CPU */
@@ -823,7 +817,7 @@ int __init setup_irq(unsigned int irq, s
 
     desc->action  = new;
     desc->status &= ~IRQ_DISABLED;
-    desc->handler->startup(irq);
+    desc->handler->startup(desc);
 
     spin_unlock_irqrestore(&desc->lock,flags);
 
@@ -914,7 +908,8 @@ static void irq_guest_eoi_timer_fn(void 
     switch ( action->ack_type )
     {
     case ACKTYPE_UNMASK:
-        desc->handler->end(irq, 0);
+        if ( desc->handler->end )
+            desc->handler->end(desc, 0);
         break;
     case ACKTYPE_EOI:
         cpu_eoi_map = action->cpu_eoi_map;
@@ -942,7 +937,8 @@ static void __do_IRQ_guest(int irq)
         /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
         ASSERT(action->ack_type == ACKTYPE_EOI);
         ASSERT(desc->status & IRQ_DISABLED);
-        desc->handler->end(irq, vector);
+        if ( desc->handler->end )
+            desc->handler->end(desc, vector);
         return;
     }
 
@@ -1156,7 +1152,8 @@ static void flush_ready_eoi(void)
         ASSERT(irq > 0);
         desc = irq_to_desc(irq);
         spin_lock(&desc->lock);
-        desc->handler->end(irq, peoi[sp].vector);
+        if ( desc->handler->end )
+            desc->handler->end(desc, peoi[sp].vector);
         spin_unlock(&desc->lock);
     }
 
@@ -1234,7 +1231,8 @@ void desc_guest_eoi(struct irq_desc *des
     if ( action->ack_type == ACKTYPE_UNMASK )
     {
         ASSERT(cpus_empty(action->cpu_eoi_map));
-        desc->handler->end(irq, 0);
+        if ( desc->handler->end )
+            desc->handler->end(desc, 0);
         spin_unlock_irq(&desc->lock);
         return;
     }
@@ -1402,7 +1400,7 @@ int pirq_guest_bind(struct vcpu *v, stru
 
         desc->status |= IRQ_GUEST;
         desc->status &= ~IRQ_DISABLED;
-        desc->handler->startup(irq);
+        desc->handler->startup(desc);
 
         /* Attempt to bind the interrupt target to the correct CPU. */
         cpu_set(v->processor, cpumask);
@@ -1486,8 +1484,9 @@ static irq_guest_action_t *__pirq_guest_
     {
     case ACKTYPE_UNMASK:
         if ( test_and_clear_bool(pirq->masked) &&
-             (--action->in_flight == 0) )
-            desc->handler->end(irq, 0);
+             (--action->in_flight == 0) &&
+             desc->handler->end )
+                desc->handler->end(desc, 0);
         break;
     case ACKTYPE_EOI:
         /* NB. If #guests == 0 then we clear the eoi_map later on. */
@@ -1516,7 +1515,7 @@ static irq_guest_action_t *__pirq_guest_
 
     /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
     desc->status |= IRQ_DISABLED;
-    desc->handler->disable(irq);
+    desc->handler->disable(desc);
 
     /*
      * Mark any remaining pending EOIs as ready to flush.
@@ -1538,7 +1537,7 @@ static irq_guest_action_t *__pirq_guest_
 
     desc->action = NULL;
     desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS);
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
 
     /* Caller frees the old guest descriptor block. */
     return action;
@@ -1958,7 +1957,7 @@ void fixup_irqs(void)
         }
 
         if ( desc->handler->disable )
-            desc->handler->disable(irq);
+            desc->handler->disable(desc);
 
         if ( desc->handler->set_affinity )
             desc->handler->set_affinity(desc, &affinity);
@@ -1966,7 +1965,7 @@ void fixup_irqs(void)
             set_affinity = 0;
 
         if ( desc->handler->enable )
-            desc->handler->enable(irq);
+            desc->handler->enable(desc);
 
         spin_unlock(&desc->lock);
 
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -336,11 +336,11 @@ int msi_maskable_irq(const struct msi_de
            || entry->msi_attrib.maskbit;
 }
 
-static void msi_set_mask_bit(unsigned int irq, int flag)
+static void msi_set_mask_bit(struct irq_desc *desc, int flag)
 {
-    struct msi_desc *entry = irq_desc[irq].msi_desc;
+    struct msi_desc *entry = desc->msi_desc;
 
-    ASSERT(spin_is_locked(&irq_desc[irq].lock));
+    ASSERT(spin_is_locked(&desc->lock));
     BUG_ON(!entry || !entry->dev);
     switch (entry->msi_attrib.type) {
     case PCI_CAP_ID_MSI:
@@ -387,14 +387,14 @@ static int msi_get_mask_bit(const struct
     return -1;
 }
 
-void mask_msi_irq(unsigned int irq)
+void mask_msi_irq(struct irq_desc *desc)
 {
-    msi_set_mask_bit(irq, 1);
+    msi_set_mask_bit(desc, 1);
 }
 
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_irq(struct irq_desc *desc)
 {
-    msi_set_mask_bit(irq, 0);
+    msi_set_mask_bit(desc, 0);
 }
 
 static struct msi_desc* alloc_msi_entry(void)
@@ -974,7 +974,7 @@ int pci_restore_msi_state(struct pci_dev
 
         write_msi_msg(entry, &entry->msg);
 
-        msi_set_mask_bit(irq, entry->msi_attrib.masked);
+        msi_set_mask_bit(desc, entry->msi_attrib.masked);
 
         if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
             msi_set_enable(pdev, 1);
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -29,7 +29,6 @@
 #include <asm-x86/fixmap.h>
 #include <mach_apic.h>
 
-static struct amd_iommu **__read_mostly irq_to_iommu;
 static int __initdata nr_amd_iommus;
 
 unsigned short ivrs_bdf_entries;
@@ -403,10 +402,10 @@ static void amd_iommu_msi_enable(struct 
         iommu->msi_cap + PCI_MSI_FLAGS, control);
 }
 
-static void iommu_msi_unmask(unsigned int irq)
+static void iommu_msi_unmask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct amd_iommu *iommu = irq_to_iommu[irq];
+    struct amd_iommu *iommu = desc->action->dev_id;
 
     /* FIXME: do not support mask bits at the moment */
     if ( iommu->maskbit )
@@ -417,11 +416,10 @@ static void iommu_msi_unmask(unsigned in
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static void iommu_msi_mask(unsigned int irq)
+static void iommu_msi_mask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct amd_iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct amd_iommu *iommu = desc->action->dev_id;
 
     irq_complete_move(desc);
 
@@ -434,15 +432,15 @@ static void iommu_msi_mask(unsigned int 
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static unsigned int iommu_msi_startup(unsigned int irq)
+static unsigned int iommu_msi_startup(struct irq_desc *desc)
 {
-    iommu_msi_unmask(irq);
+    iommu_msi_unmask(desc);
     return 0;
 }
 
-static void iommu_msi_end(unsigned int irq, u8 vector)
+static void iommu_msi_end(struct irq_desc *desc, u8 vector)
 {
-    iommu_msi_unmask(irq);
+    iommu_msi_unmask(desc);
     ack_APIC_irq();
 }
 
@@ -557,13 +555,11 @@ static int __init set_iommu_interrupt_ha
     }
     
     irq_desc[irq].handler = &iommu_msi_type;
-    irq_to_iommu[irq] = iommu;
     ret = request_irq(irq, amd_iommu_page_fault, 0,
                              "amd_iommu", iommu);
     if ( ret )
     {
         irq_desc[irq].handler = &no_irq_type;
-        irq_to_iommu[irq] = NULL;
         destroy_irq(irq);
         AMD_IOMMU_DEBUG("can't request irq\n");
         return 0;
@@ -728,13 +724,6 @@ static void __init amd_iommu_init_cleanu
         ivrs_mappings = NULL;
     }
 
-    /* free irq_to_iommu[] */
-    if ( irq_to_iommu )
-    {
-        xfree(irq_to_iommu);
-        irq_to_iommu = NULL;
-    }
-
     iommu_enabled = 0;
     iommu_passthrough = 0;
     iommu_intremap = 0;
@@ -838,11 +827,6 @@ int __init amd_iommu_init(void)
 
     BUG_ON( !iommu_found() );
 
-    irq_to_iommu = xmalloc_array(struct amd_iommu *, nr_irqs);
-    if ( irq_to_iommu == NULL )
-        goto error_out;
-    memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
     ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
 
     if ( !ivrs_bdf_entries )
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -829,7 +829,6 @@ static const char *iommu_get_fault_reaso
     }
 }
 
-static struct iommu **irq_to_iommu;
 static int iommu_page_fault_do_one(struct iommu *iommu, int type,
                                    u8 fault_reason, u16 source_id, u64 addr)
 {
@@ -961,9 +960,9 @@ clear_overflow:
     }
 }
 
-static void dma_msi_unmask(unsigned int irq)
+static void dma_msi_unmask(struct irq_desc *desc)
 {
-    struct iommu *iommu = irq_to_iommu[irq];
+    struct iommu *iommu = desc->action->dev_id;
     unsigned long flags;
 
     /* unmask it */
@@ -972,11 +971,10 @@ static void dma_msi_unmask(unsigned int 
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
-static void dma_msi_mask(unsigned int irq)
+static void dma_msi_mask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct iommu *iommu = desc->action->dev_id;
 
     irq_complete_move(desc);
 
@@ -986,15 +984,15 @@ static void dma_msi_mask(unsigned int ir
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
-static unsigned int dma_msi_startup(unsigned int irq)
+static unsigned int dma_msi_startup(struct irq_desc *desc)
 {
-    dma_msi_unmask(irq);
+    dma_msi_unmask(desc);
     return 0;
 }
 
-static void dma_msi_end(unsigned int irq, u8 vector)
+static void dma_msi_end(struct irq_desc *desc, u8 vector)
 {
-    dma_msi_unmask(irq);
+    dma_msi_unmask(desc);
     ack_APIC_irq();
 }
 
@@ -1071,7 +1069,6 @@ static int __init iommu_set_interrupt(st
     }
 
     irq_desc[irq].handler = &dma_msi_type;
-    irq_to_iommu[irq] = iommu;
 #ifdef CONFIG_X86
     ret = request_irq(irq, iommu_page_fault, 0, "dmar", iommu);
 #else
@@ -1080,7 +1077,6 @@ static int __init iommu_set_interrupt(st
     if ( ret )
     {
         irq_desc[irq].handler = &no_irq_type;
-        irq_to_iommu[irq] = NULL;
         destroy_irq(irq);
         dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
         return ret;
@@ -2091,13 +2087,6 @@ int __init intel_vtd_setup(void)
 
     platform_quirks_init();
 
-    irq_to_iommu = xmalloc_array(struct iommu*, nr_irqs);
-    BUG_ON(!irq_to_iommu);
-    memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
-    if(!irq_to_iommu)
-        return -ENOMEM;
-
     /* We enable the following features only if they are supported by all VT-d
      * engines: Snoop Control, DMA passthrough, Queued Invalidation and
      * Interrupt Remapping.
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -27,6 +27,8 @@ typedef struct {
     DECLARE_BITMAP(_bits,NR_VECTORS);
 } vmask_t;
 
+struct irq_desc;
+
 struct irq_cfg {
         s16 vector;                  /* vector itself is only 8 bits, */
         s16 old_vector;              /* but we use -1 for unassigned  */
@@ -107,8 +109,8 @@ fastcall void smp_irq_move_cleanup_inter
 
 asmlinkage void do_IRQ(struct cpu_user_regs *regs);
 
-void disable_8259A_irq(unsigned int irq);
-void enable_8259A_irq(unsigned int irq);
+void disable_8259A_irq(struct irq_desc *);
+void enable_8259A_irq(struct irq_desc *);
 int i8259A_irq_pending(unsigned int irq);
 void mask_8259A(void);
 void unmask_8259A(void);
@@ -161,7 +163,6 @@ int irq_to_vector(int irq);
 int create_irq(void);
 void destroy_irq(unsigned int irq);
 
-struct irq_desc;
 extern void irq_complete_move(struct irq_desc *);
 
 extern struct irq_desc *irq_desc;
@@ -171,7 +172,7 @@ void unlock_vector_lock(void);
 
 void __setup_vector_irq(int cpu);
 
-void move_native_irq(int irq);
+void move_native_irq(struct irq_desc *);
 void move_masked_irq(struct irq_desc *);
 
 int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -75,8 +75,8 @@ struct msi_msg {
 
 struct msi_desc;
 /* Helper functions */
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
+extern void mask_msi_irq(struct irq_desc *);
+extern void unmask_msi_irq(struct irq_desc *);
 extern void set_msi_affinity(struct irq_desc *, const cpumask_t *);
 extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
 extern void pci_disable_msi(struct msi_desc *desc);
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -41,12 +41,12 @@ struct irq_desc;
  */
 struct hw_interrupt_type {
     const char *typename;
-    unsigned int (*startup)(unsigned int irq);
-    void (*shutdown)(unsigned int irq);
-    void (*enable)(unsigned int irq);
-    void (*disable)(unsigned int irq);
-    void (*ack)(unsigned int irq);
-    void (*end)(unsigned int irq, u8 vector);
+    unsigned int (*startup)(struct irq_desc *);
+    void (*shutdown)(struct irq_desc *);
+    void (*enable)(struct irq_desc *);
+    void (*disable)(struct irq_desc *);
+    void (*ack)(struct irq_desc *);
+    void (*end)(struct irq_desc *, u8 vector);
     void (*set_affinity)(struct irq_desc *, const cpumask_t *);
 };
 
@@ -133,6 +133,11 @@ extern int request_irq(unsigned int irq,
 
 extern hw_irq_controller no_irq_type;
 extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
+extern unsigned int irq_startup_none(struct irq_desc *);
+extern void irq_actor_none(struct irq_desc *);
+#define irq_shutdown_none irq_actor_none
+#define irq_disable_none irq_actor_none
+#define irq_enable_none irq_actor_none
 
 struct domain;
 struct vcpu;


Attachment: irq-controller-actions-desc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>