WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 15/24] [xen-unstable.hg] use new xen-internal API whi

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 15/24] [xen-unstable.hg] use new xen-internal API which allows stubdom to be registered to handle VIRQ_DOM_EXC
From: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx>
Date: Mon, 23 Mar 2009 15:21:16 +0000
Delivery-date: Mon, 23 Mar 2009 08:33:21 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.19 (X11/20090105)


Converts most users to the VIRQ handler functions introduced in the
previous patch.

I sent these to Keir in an earlier state. See the explanation with
xen_virq_handler_api.

Signed-off-by: Diego Ongaro <diego.ongaro@xxxxxxxxxx>
Signed-off-by: Alex Zeffertt <alex.zeffertt@xxxxxxxxxxxxx>
---

diff -r de77d76f8b67 xen/arch/x86/cpu/mcheck/amd_nonfatal.c
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c    Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c    Wed Mar 18 15:41:08 2009 +0000
@@ -167,7 +167,7 @@
                /* If Dom0 enabled the VIRQ_MCA event, then ... */
                if (event_enabled)
                        /* ... notify it. */
-                       send_guest_global_virq(dom0, VIRQ_MCA);
+                       send_handler_global_virq(VIRQ_MCA);
                else
                        /* ... or dump it */
                        x86_mcinfo_dump(mc_data);
diff -r de77d76f8b67 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed Mar 18 15:41:08 2009 +0000
@@ -387,9 +387,11 @@
      */
     mi = machine_check_poll(MC_FLAG_CMCI);
     if (mi) {
+               struct domain *handler;
         x86_mcinfo_dump(mi);
-        if (dom0 && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
-            send_guest_global_virq(dom0, VIRQ_MCA);
+        handler = get_global_virq_handler(VIRQ_MCA);
+        if (handler && guest_enabled_event(handler->vcpu[0], VIRQ_MCA))
+            send_guest_global_virq(handler, VIRQ_MCA);
     }
 
     printk(KERN_DEBUG "CMCI: CPU%d owner_map[%lx], no_cmci_map[%lx]\n", 
@@ -494,9 +496,11 @@
     printk(KERN_DEBUG "CMCI: cmci_intr happen on CPU%d\n", cpu);
     mi = machine_check_poll(MC_FLAG_CMCI);
     if (mi) {
+               struct domain *handler;
         x86_mcinfo_dump(mi);
-        if (dom0 && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
-            send_guest_global_virq(dom0, VIRQ_MCA);
+        handler = get_global_virq_handler(VIRQ_MCA);
+        if (handler && guest_enabled_event(handler->vcpu[0], VIRQ_MCA))
+            send_guest_global_virq(handler, VIRQ_MCA);
     }
     irq_exit();
 }
@@ -608,10 +612,12 @@
     mi = machine_check_poll(MC_FLAG_POLLED);
     if (mi)
     {
+               struct domain *handler;
         x86_mcinfo_dump(mi);
         adjust++;
-        if (dom0 && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
-            send_guest_global_virq(dom0, VIRQ_MCA);
+        handler = get_global_virq_handler(VIRQ_MCA);
+        if (handler && guest_enabled_event(handler->vcpu[0], VIRQ_MCA))
+            send_guest_global_virq(handler, VIRQ_MCA);
     }
 }
 
diff -r de77d76f8b67 xen/common/domain.c
--- a/xen/common/domain.c       Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/common/domain.c       Wed Mar 18 15:41:08 2009 +0000
@@ -106,7 +106,7 @@
     if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
         evtchn_send(d, d->suspend_evtchn);
     else
-        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+        send_handler_global_virq(VIRQ_DOM_EXC);
 }
 
 static void vcpu_check_shutdown(struct vcpu *v)
@@ -388,7 +388,7 @@
         }
         d->is_dying = DOMDYING_dead;
         put_domain(d);
-        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+        send_handler_global_virq(VIRQ_DOM_EXC);
         /* fallthrough */
     case DOMDYING_dead:
         break;
@@ -538,7 +538,7 @@
     for_each_vcpu ( d, v )
         vcpu_sleep_nosync(v);
 
-    send_guest_global_virq(dom0, VIRQ_DEBUGGER);
+    send_handler_global_virq(VIRQ_DEBUGGER);
 }
 
 /* Complete domain destroy after RCU readers are not holding old references. */
@@ -579,7 +579,7 @@
     xsm_free_security_domain(d);
     free_domain_struct(d);
 
-    send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+    send_handler_global_virq(VIRQ_DOM_EXC);
 }
 
 /* Release resources belonging to task @p. */
diff -r de77d76f8b67 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/common/event_channel.c        Wed Mar 18 15:41:08 2009 +0000
@@ -677,6 +677,14 @@
     send_guest_global_virq(_get_global_virq_handler(virq), virq);
 }
 
+struct domain* get_global_virq_handler(int virq)
+{
+    ASSERT(virq >= 0 && virq < NR_VIRQS);
+    ASSERT(virq_is_global(virq));
+
+    return _get_global_virq_handler(virq);
+}
+
 int set_global_virq_handler(struct domain *d, int virq)
 {
     struct domain *old;
diff -r de77d76f8b67 xen/common/trace.c
--- a/xen/common/trace.c        Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/common/trace.c        Wed Mar 18 15:41:08 2009 +0000
@@ -409,7 +409,7 @@
  */
 static void trace_notify_dom0(unsigned long unused)
 {
-    send_guest_global_virq(dom0, VIRQ_TBUF);
+    send_handler_global_virq(VIRQ_TBUF);
 }
 static DECLARE_TASKLET(trace_notify_dom0_tasklet, trace_notify_dom0, 0);
 
diff -r de77d76f8b67 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c        Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/drivers/char/console.c        Wed Mar 18 15:41:08 2009 +0000
@@ -290,7 +290,7 @@
     if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
         serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
     /* Always notify the guest: prevents receive path from getting stuck. */
-    send_guest_global_virq(dom0, VIRQ_CONSOLE);
+    send_handler_global_virq(VIRQ_CONSOLE);
 }
 
 static void serial_rx(char c, struct cpu_user_regs *regs)
@@ -317,7 +317,7 @@
 
 static void notify_dom0_con_ring(unsigned long unused)
 {
-    send_guest_global_virq(dom0, VIRQ_CON_RING);
+    send_handler_global_virq(VIRQ_CON_RING);
 }
 static DECLARE_TASKLET(notify_dom0_con_ring_tasklet, notify_dom0_con_ring, 0);
 
diff -r de77d76f8b67 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Wed Mar 18 15:00:12 2009 +0000
+++ b/xen/include/xen/event.h   Wed Mar 18 15:41:08 2009 +0000
@@ -39,6 +39,14 @@
  * set_global_virq_handler), or dom0. 
  */
 void send_handler_global_virq(int virq);
+
+/*
+ * get_global_virq_handler: Get a global VIRQ handler.
+ *  @virq:     Virtual IRQ number (VIRQ_*), must be global
+ * Returns the explicitly set handler (see
+ * set_global_virq_handler), or dom0. 
+ */
+struct domain* get_global_virq_handler(int virq);
 
 /*
  * sent_global_virq_handler: Set a global VIRQ handler.


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 15/24] [xen-unstable.hg] use new xen-internal API which allows stubdom to be registered to handle VIRQ_DOM_EXC, Alex Zeffertt <=