WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: replace nr_irqs sized per-domain arr

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: replace nr_irqs sized per-domain arrays with radix trees
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 11 May 2011 04:40:11 +0100
Delivery-date: Tue, 10 May 2011 20:45:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1304929662 -3600
# Node ID c822888f36568f26e95f9844c7f0c5e06df7aa20
# Parent  44bfebf40b2bb7f219333ef5bf97eb7493592cdc
x86: replace nr_irqs sized per-domain arrays with radix trees

It would seem possible to fold the two trees into one (making e.g. the
emuirq bits stored in the upper half of the pointer), but I'm not
certain that's worth it as it would make deletion of entries more
cumbersome. Unless pirq-s and emuirq-s were mutually exclusive...

v2: Split setup/teardown into two stages - (de-)allocation (tree node
(de-)population) is done with just d->event_lock held (and hence
interrupts enabled), while actual insertion/removal of translation
data gets done with irq_desc's lock held (and interrupts disabled).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Fix up for new radix-tree implementation. In particular, we should
never insert NULL into a radix tree, as that means empty slot (which
can be reclaimed during a deletion). Make use of
radix_tree_int_to_ptr() (and its inverse) to hide some of these
details.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 44bfebf40b2b -r c822888f3656 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon May 09 09:25:23 2011 +0100
+++ b/xen/arch/x86/domain.c     Mon May 09 09:27:42 2011 +0100
@@ -614,26 +614,16 @@
         memset(d->arch.pirq_irq, 0,
                d->nr_pirqs * sizeof(*d->arch.pirq_irq));
 
-        d->arch.irq_pirq = xmalloc_array(int, nr_irqs);
-        if ( !d->arch.irq_pirq )
+        if ( (rc = init_domain_irq_mapping(d)) != 0 )
             goto fail;
-        memset(d->arch.irq_pirq, 0,
-               nr_irqs * sizeof(*d->arch.irq_pirq));
-
-        for ( i = 1; platform_legacy_irq(i); ++i )
-            if ( !IO_APIC_IRQ(i) )
-                d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
 
         if ( is_hvm_domain(d) )
         {
             d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
-            d->arch.emuirq_pirq = xmalloc_array(int, nr_irqs);
-            if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+            if ( !d->arch.pirq_emuirq )
                 goto fail;
             for (i = 0; i < d->nr_pirqs; i++)
                 d->arch.pirq_emuirq[i] = IRQ_UNBOUND;
-            for (i = 0; i < nr_irqs; i++)
-                d->arch.emuirq_pirq[i] = IRQ_UNBOUND;
         }
 
 
@@ -671,9 +661,8 @@
     d->is_dying = DOMDYING_dead;
     vmce_destroy_msr(d);
     xfree(d->arch.pirq_irq);
-    xfree(d->arch.irq_pirq);
     xfree(d->arch.pirq_emuirq);
-    xfree(d->arch.emuirq_pirq);
+    cleanup_domain_irq_mapping(d);
     free_xenheap_page(d->shared_info);
     if ( paging_initialised )
         paging_final_teardown(d);
@@ -726,9 +715,8 @@
 
     free_xenheap_page(d->shared_info);
     xfree(d->arch.pirq_irq);
-    xfree(d->arch.irq_pirq);
     xfree(d->arch.pirq_emuirq);
-    xfree(d->arch.emuirq_pirq);
+    cleanup_domain_irq_mapping(d);
 }
 
 unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff -r 44bfebf40b2b -r c822888f3656 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Mon May 09 09:25:23 2011 +0100
+++ b/xen/arch/x86/irq.c        Mon May 09 09:27:42 2011 +0100
@@ -950,6 +950,65 @@
     return desc;
 }
 
+static int prepare_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    int err = radix_tree_insert(&d->arch.irq_pirq, irq,
+                                radix_tree_int_to_ptr(0));
+    return (err != -EEXIST) ? err : 0;
+}
+
+static void set_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    radix_tree_replace_slot(
+        radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+        radix_tree_int_to_ptr(pirq));
+    d->arch.pirq_irq[pirq] = irq;
+}
+
+static void clear_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    d->arch.pirq_irq[pirq] = 0;
+    radix_tree_replace_slot(
+        radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+        radix_tree_int_to_ptr(0));
+}
+
+static void cleanup_domain_irq_pirq(struct domain *d, int irq, int pirq)
+{
+    radix_tree_delete(&d->arch.irq_pirq, irq);
+}
+
+int init_domain_irq_mapping(struct domain *d)
+{
+    unsigned int i;
+    int err = 0;
+
+    radix_tree_init(&d->arch.irq_pirq);
+    if ( is_hvm_domain(d) )
+        radix_tree_init(&d->arch.hvm_domain.emuirq_pirq);
+
+    for ( i = 1; platform_legacy_irq(i); ++i )
+    {
+        if ( IO_APIC_IRQ(i) )
+            continue;
+        err = prepare_domain_irq_pirq(d, i, i);
+        if ( err )
+            break;
+        set_domain_irq_pirq(d, i, i);
+    }
+
+    if ( err )
+        cleanup_domain_irq_mapping(d);
+    return err;
+}
+
+void cleanup_domain_irq_mapping(struct domain *d)
+{
+    radix_tree_destroy(&d->arch.irq_pirq, NULL);
+    if ( is_hvm_domain(d) )
+        radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq, NULL);
+}
+
 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
 static void flush_ready_eoi(void)
 {
@@ -1373,7 +1432,7 @@
 {
     irq_guest_action_t *oldaction = NULL;
     struct irq_desc *desc;
-    int irq;
+    int irq = 0;
 
     WARN_ON(!spin_is_locked(&d->event_lock));
 
@@ -1386,7 +1445,7 @@
         BUG_ON(irq <= 0);
         desc = irq_to_desc(irq);
         spin_lock_irq(&desc->lock);
-        d->arch.pirq_irq[pirq] = d->arch.irq_pirq[irq] = 0;
+        clear_domain_irq_pirq(d, irq, pirq);
     }
     else
     {
@@ -1400,6 +1459,8 @@
         kill_timer(&oldaction->eoi_timer);
         xfree(oldaction);
     }
+    else if ( irq > 0 )
+        cleanup_domain_irq_pirq(d, irq, pirq);
 }
 
 static int pirq_guest_force_unbind(struct domain *d, int irq)
@@ -1523,6 +1584,10 @@
         return ret;
     }
 
+    ret = prepare_domain_irq_pirq(d, irq, pirq);
+    if ( ret )
+        return ret;
+
     desc = irq_to_desc(irq);
 
     if ( type == MAP_PIRQ_TYPE_MSI )
@@ -1544,19 +1609,20 @@
             dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
               d->domain_id, irq);
         desc->handler = &pci_msi_type;
-        d->arch.pirq_irq[pirq] = irq;
-        d->arch.irq_pirq[irq] = pirq;
+        set_domain_irq_pirq(d, irq, pirq);
         setup_msi_irq(pdev, msi_desc, irq);
         spin_unlock_irqrestore(&desc->lock, flags);
-    } else
+    }
+    else
     {
         spin_lock_irqsave(&desc->lock, flags);
-        d->arch.pirq_irq[pirq] = irq;
-        d->arch.irq_pirq[irq] = pirq;
+        set_domain_irq_pirq(d, irq, pirq);
         spin_unlock_irqrestore(&desc->lock, flags);
     }
 
  done:
+    if ( ret )
+        cleanup_domain_irq_pirq(d, irq, pirq);
     return ret;
 }
 
@@ -1599,20 +1665,22 @@
     BUG_ON(irq != domain_pirq_to_irq(d, pirq));
 
     if ( !forced_unbind )
-    {
-        d->arch.pirq_irq[pirq] = 0;
-        d->arch.irq_pirq[irq] = 0;
-    }
+        clear_domain_irq_pirq(d, irq, pirq);
     else
     {
         d->arch.pirq_irq[pirq] = -irq;
-        d->arch.irq_pirq[irq] = -pirq;
+        radix_tree_replace_slot(
+            radix_tree_lookup_slot(&d->arch.irq_pirq, irq),
+            radix_tree_int_to_ptr(-pirq));
     }
 
     spin_unlock_irqrestore(&desc->lock, flags);
     if (msi_desc)
         msi_free_irq(msi_desc);
 
+    if ( !forced_unbind )
+        cleanup_domain_irq_pirq(d, irq, pirq);
+
     ret = irq_deny_access(d, pirq);
     if ( ret )
         dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
@@ -1829,10 +1897,27 @@
         return 0;
     }
 
-    d->arch.pirq_emuirq[pirq] = emuirq;
     /* do not store emuirq mappings for pt devices */
     if ( emuirq != IRQ_PT )
-        d->arch.emuirq_pirq[emuirq] = pirq;
+    {
+        int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+                                    radix_tree_int_to_ptr(pirq));
+
+        switch ( err )
+        {
+        case 0:
+            break;
+        case -EEXIST:
+            radix_tree_replace_slot(
+                radix_tree_lookup_slot(
+                    &d->arch.hvm_domain.emuirq_pirq, emuirq),
+                radix_tree_int_to_ptr(pirq));
+            break;
+        default:
+            return err;
+        }
+    }
+    d->arch.pirq_emuirq[pirq] = emuirq;
 
     return 0;
 }
@@ -1860,7 +1945,7 @@
 
     d->arch.pirq_emuirq[pirq] = IRQ_UNBOUND;
     if ( emuirq != IRQ_PT )
-        d->arch.emuirq_pirq[emuirq] = IRQ_UNBOUND;
+        radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq);
 
  done:
     return ret;
diff -r 44bfebf40b2b -r c822888f3656 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Mon May 09 09:25:23 2011 +0100
+++ b/xen/include/asm-x86/domain.h      Mon May 09 09:27:42 2011 +0100
@@ -3,6 +3,7 @@
 
 #include <xen/config.h>
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
 #include <asm/hvm/vcpu.h>
 #include <asm/hvm/domain.h>
 #include <asm/e820.h>
@@ -284,10 +285,9 @@
     const char *nested_p2m_function;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
-    int *irq_pirq;
+    struct radix_tree_root irq_pirq;
     int *pirq_irq;
-    /* pirq to emulated irq and vice versa */
-    int *emuirq_pirq;
+    /* pirq to emulated irq */
     int *pirq_emuirq;
 
     /* Maximum physical-address bitwidth supported by this guest. */
diff -r 44bfebf40b2b -r c822888f3656 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Mon May 09 09:25:23 2011 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Mon May 09 09:27:42 2011 +0100
@@ -59,6 +59,9 @@
     /* VCPU which is current target for 8259 interrupts. */
     struct vcpu           *i8259_target;
 
+    /* emulated irq to pirq */
+    struct radix_tree_root emuirq_pirq;
+
     /* hvm_print_line() logging. */
 #define HVM_PBUF_SIZE 80
     char                  *pbuf;
diff -r 44bfebf40b2b -r c822888f3656 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Mon May 09 09:25:23 2011 +0100
+++ b/xen/include/asm-x86/irq.h Mon May 09 09:27:42 2011 +0100
@@ -143,11 +143,21 @@
 
 void irq_set_affinity(struct irq_desc *, const cpumask_t *mask);
 
+int init_domain_irq_mapping(struct domain *);
+void cleanup_domain_irq_mapping(struct domain *);
+
 #define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
-#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_irq_to_pirq(d, irq) ({                           \
+    void *__ret = radix_tree_lookup(&(d)->arch.irq_pirq, irq);  \
+    __ret ? radix_tree_ptr_to_int(__ret) : 0;                   \
+})
 #define PIRQ_ALLOCATED -1
 #define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
-#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
+#define domain_emuirq_to_pirq(d, emuirq) ({                             \
+    void *__ret = radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq,  \
+                                    emuirq);                            \
+    __ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND;                 \
+})
 #define IRQ_UNBOUND -1
 #define IRQ_PT -2
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>