[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 17/22] vixen: setup infrastructure to receive event channel notifications



From: Anthony Liguori <aliguori@xxxxxxxxxx>

This patch registers an interrupt handler using either an INTx
interrupt from the platform PCI device, CALLBACK_IRQ vector
delivery, or evtchn_upcall_vector depending on what the parent
hypervisor supports.

The event channel polling code comes from Linux but uses the
internal infrastructure for delivery.

Finally, this infrastructure has to be initialized per-VCPU so
hook the appropriate place for that.

Signed-off-by: KarimAllah Ahmed <karahmed@xxxxxxxxx>
Signed-off-by: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Signed-off-by: Anthony Liguori <aliguori@xxxxxxxxxx>
---
 xen/arch/x86/domain.c             |   3 +
 xen/arch/x86/guest/vixen.c        | 264 ++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/setup.c              |   3 +
 xen/include/asm-x86/guest/vixen.h |   6 +
 4 files changed, 276 insertions(+)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index da1bf1a..3e9c5be 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1147,6 +1147,9 @@ int arch_set_info_guest(
 
     update_cr3(v);
 
+    if ( is_vixen() )
+        vixen_vcpu_initialize(v);
+
  out:
     if ( flags & VGCF_online )
         clear_bit(_VPF_down, &v->pause_flags);
diff --git a/xen/arch/x86/guest/vixen.c b/xen/arch/x86/guest/vixen.c
index 1816ece..76d9638 100644
--- a/xen/arch/x86/guest/vixen.c
+++ b/xen/arch/x86/guest/vixen.c
@@ -21,10 +21,16 @@
 
 #include <asm/guest/vixen.h>
 #include <public/version.h>
+#include <xen/event.h>
+#include <asm/apic.h>
 
 static int in_vixen;
 static uint8_t global_si_data[4 << 10] __attribute__((aligned(4096)));
 static shared_info_t *global_si = (void *)global_si_data;
+static bool vixen_per_cpu_notifications = true;
+static uint8_t vixen_evtchn_vector;
+static bool vixen_needs_apic_ack = true;
+struct irqaction vixen_irqaction;
 
 void __init init_vixen(void)
 {
@@ -94,3 +100,261 @@ u64 vixen_get_cpu_freq(void)
        return imm >> time.tsc_shift;
     }
 }
+
+/*
+ * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
+ * array. Primarily to avoid long lines (hence the terse name).
+ */
+#define BM(x) (unsigned long *)(x)
+/* Find the first set bit in a evtchn mask */
+#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_XEN_ULONG)
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
+
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+static inline xen_ulong_t active_evtchns(unsigned int cpu,
+                                         shared_info_t *sh,
+                                         unsigned int idx)
+{
+    return sh->native.evtchn_pending[idx] &
+           ~sh->native.evtchn_mask[idx];
+}
+
+static void vixen_evtchn_poll_one(size_t cpu)
+{
+    shared_info_t *s = global_si;
+    struct vcpu_info *vcpu_info = &s->native.vcpu_info[cpu];
+    xen_ulong_t pending_words;
+    xen_ulong_t pending_bits;
+    int start_word_idx, start_bit_idx;
+    int word_idx, bit_idx, i;
+
+    /*
+     * Master flag must be cleared /before/ clearing
+     * selector flag. xchg_xen_ulong must contain an
+     * appropriate barrier.
+     */
+    pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+    start_word_idx = this_cpu(current_word_idx);
+    start_bit_idx = this_cpu(current_bit_idx);
+
+    word_idx = start_word_idx;
+
+    for (i = 0; pending_words != 0; i++) {
+        xen_ulong_t words;
+
+        words = MASK_LSBS(pending_words, word_idx);
+
+        /*
+         * If we masked out all events, wrap to beginning.
+         */
+        if (words == 0) {
+            word_idx = 0;
+            bit_idx = 0;
+            continue;
+        }
+        word_idx = EVTCHN_FIRST_BIT(words);
+
+        pending_bits = active_evtchns(cpu, s, word_idx);
+        bit_idx = 0; /* usually scan entire word from start */
+        /*
+         * We scan the starting word in two parts.
+         *
+         * 1st time: start in the middle, scanning the
+         * upper bits.
+         *
+         * 2nd time: scan the whole word (not just the
+         * parts skipped in the first pass) -- if an
+         * event in the previously scanned bits is
+         * pending again it would just be scanned on
+         * the next loop anyway.
+         */
+        if (word_idx == start_word_idx) {
+            if (i == 0)
+                bit_idx = start_bit_idx;
+        }
+
+        do {
+            struct evtchn *chn;
+            xen_ulong_t bits;
+            int port;
+
+            bits = MASK_LSBS(pending_bits, bit_idx);
+
+            /* If we masked out all events, move on. */
+            if (bits == 0)
+                break;
+
+            bit_idx = EVTCHN_FIRST_BIT(bits);
+
+            /* Process port. */
+            port = (word_idx * BITS_PER_XEN_ULONG) + bit_idx;
+
+            chn = evtchn_from_port(hardware_domain, port);
+            clear_bit(port, s->native.evtchn_pending);
+            evtchn_port_set_pending(hardware_domain, chn->notify_vcpu_id, chn);
+
+            bit_idx = (bit_idx + 1) % BITS_PER_XEN_ULONG;
+
+            /* Next caller starts at last processed + 1 */
+            this_cpu(current_word_idx) = bit_idx ? word_idx : (word_idx+1) % 
BITS_PER_XEN_ULONG;
+            this_cpu(current_bit_idx) = bit_idx;
+        } while (bit_idx != 0);
+
+        /* Scan start_l1i twice; all others once. */
+        if ((word_idx != start_word_idx) || (i != 0))
+            pending_words &= ~(1UL << word_idx);
+
+        word_idx = (word_idx + 1) % BITS_PER_XEN_ULONG;
+    }
+}
+
+static void vixen_upcall(int cpu)
+{
+    shared_info_t *s = global_si;
+    struct vcpu_info *vcpu_info = &s->native.vcpu_info[cpu];
+
+    do {
+        vcpu_info->evtchn_upcall_pending = 0;
+        vixen_evtchn_poll_one(cpu);
+    } while (vcpu_info->evtchn_upcall_pending);
+}
+
+static void vixen_evtchn_notify(struct cpu_user_regs *regs)
+{
+    if (vixen_needs_apic_ack)
+        ack_APIC_irq();
+
+    vixen_upcall(smp_processor_id());
+}
+
+static void vixen_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
+{
+    vixen_upcall(smp_processor_id());
+}
+
+static int hvm_set_parameter(int idx, uint64_t value)
+{
+    struct xen_hvm_param xhv;
+    int r;
+
+    xhv.domid = DOMID_SELF;
+    xhv.index = idx;
+    xhv.value = value;
+    r = HYPERVISOR_hvm_op(HVMOP_set_param, &xhv);
+    if (r < 0) {
+        printk("Cannot set hvm parameter %d: %d!\n",
+               idx, r);
+        return r;
+    }
+    return r;
+}
+
+void vixen_vcpu_initialize(struct vcpu *v)
+{
+    struct xen_hvm_evtchn_upcall_vector upcall;
+    long rc;
+
+    printk("VIXEN vcpu init VCPU%d\n", v->vcpu_id);
+
+    vcpu_pin_override(v, v->vcpu_id);
+
+    if (!vixen_needs_apic_ack)
+        return;
+
+    printk("VIXEN vcpu init VCPU%d -- trying evtchn_upcall_vector\n", 
v->vcpu_id);
+
+    upcall.vcpu = v->vcpu_id;
+    upcall.vector = vixen_evtchn_vector;
+    rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &upcall);
+    if ( rc )
+    {
+        struct xen_feature_info fi;
+
+        printk("VIXEN vcpu init VCPU%d -- trying hvm_callback_vector\n", 
v->vcpu_id);
+
+        fi.submap_idx = 0;
+        rc = HYPERVISOR_xen_version(XENVER_get_features, &fi);
+        if ( !rc )
+        {
+            rc = -EINVAL;
+            if ( fi.submap & (1 << XENFEAT_hvm_callback_vector) )
+            {
+                rc = hvm_set_parameter(HVM_PARAM_CALLBACK_IRQ,
+                                       
((uint64_t)HVM_PARAM_CALLBACK_TYPE_VECTOR << 56) | vixen_evtchn_vector);
+            }
+            if ( !rc )
+                vixen_needs_apic_ack = false;
+        }
+    }
+
+    if ( rc )
+    {
+        int slot;
+
+        vixen_per_cpu_notifications = false;
+
+        printk("VIXEN vcpu init VCPU%d -- trying pci_intx_callback\n", 
v->vcpu_id);
+        for (slot = 2; slot < 32; slot++) {
+            uint16_t vendor, device;
+
+            vendor = pci_conf_read16(0, 0, slot, 0, PCI_VENDOR_ID);
+            device = pci_conf_read16(0, 0, slot, 0, PCI_DEVICE_ID);
+
+            if (vendor == 0x5853 && device == 0x0001) {
+                break;
+            }
+        }
+
+        if (slot != 32) {
+            int pin, line;
+
+            printk("Found Xen platform device at 0000:00:%02d.0\n", slot);
+            pin = pci_conf_read8(0, 0, slot, 0, PCI_INTERRUPT_PIN);
+            if (pin) {
+                line = pci_conf_read8(0, 0, slot, 0, PCI_INTERRUPT_LINE);
+                rc = hvm_set_parameter(HVM_PARAM_CALLBACK_IRQ,
+                                       (1ULL << 56) | (slot << 11) | (pin - 
1));
+
+                if (rc) {
+                    printk("Failed to setup IRQ callback\n");
+                } else {
+                    vixen_irqaction.handler = vixen_interrupt;
+                    vixen_irqaction.name = "vixen";
+                    vixen_irqaction.dev_id = 0;
+                    rc = setup_irq(line, 0, &vixen_irqaction);
+                    if (rc) {
+                        printk("Setup IRQ failed!\n");
+                    } else {
+                        printk("Xen platform LNK mapped to line %d\n", line);
+                        vixen_needs_apic_ack = false;
+                    }
+                }
+            }
+        } else {
+            printk("Cannot find Platform device\n");
+        }
+    }
+}
+
+bool vixen_has_per_cpu_notifications(void)
+{
+    return vixen_per_cpu_notifications;
+}
+
+void __init
+vixen_transform(struct domain *dom0)
+{
+    /* Setup event channel forwarding */
+    alloc_direct_apic_vector(&vixen_evtchn_vector, vixen_evtchn_notify);
+    printk("Vixen evtchn vector is %d\n", vixen_evtchn_vector);
+
+    /* Initialize the first vCPU */
+    vixen_vcpu_initialize(dom0->vcpu[0]);
+}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 07239c0..1b89844 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1603,6 +1603,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
     dom0->is_privileged = 1;
     dom0->target = NULL;
 
+    if ( is_vixen() )
+        vixen_transform(dom0);
+
     /* Grab the DOM0 command line. */
     cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
     if ( (cmdline != NULL) || (kextra != NULL) )
diff --git a/xen/include/asm-x86/guest/vixen.h 
b/xen/include/asm-x86/guest/vixen.h
index 28c4337..e486cc3 100644
--- a/xen/include/asm-x86/guest/vixen.h
+++ b/xen/include/asm-x86/guest/vixen.h
@@ -76,4 +76,10 @@ void __init early_vixen_init(void);
 
 u64 vixen_get_cpu_freq(void);
 
+bool vixen_has_per_cpu_notifications(void);
+
+void vixen_vcpu_initialize(struct vcpu *v);
+
+void __init vixen_transform(struct domain *dom0);
+
 #endif
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.