[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC][PATCH 01/13] Kemari: add ECS_TAP state to event channel



This patch implements event channel tapping.  If an inter-domain channel is set
to ECS_TAP, it calls the function registered when an event is sent to the
channel.

Signed-off-by: Yoshi Tamura <tamura.yoshiaki@xxxxxxxxxxxxx>
Signed-off-by: Yoshisato Yanagisawa <yanagisawa.yoshisato@xxxxxxxxxxxxx>
---
 xen/common/event_channel.c |  150 ++++++++++++++++++++++++++++++++++++++++++++-
 xen/include/xen/event.h    |   14 ++++
 xen/include/xen/sched.h    |   10 +++
 3 files changed, 173 insertions(+), 1 deletion(-)

diff -r 19201eebab16 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Thu Sep 25 13:33:50 2008 +0100
+++ b/xen/include/xen/sched.h   Wed Mar 04 17:04:30 2009 +0900
@@ -19,6 +19,7 @@
 #include <xen/xenoprof.h>
 #include <xen/rcupdate.h>
 #include <xen/irq.h>
+#include <xen/kemari.h>

 #ifdef CONFIG_COMPAT
 #include <compat/vcpu.h>
@@ -47,6 +48,7 @@
 #define ECS_PIRQ         4 /* Channel is bound to a physical IRQ line.       */
 #define ECS_VIRQ         5 /* Channel is bound to a virtual IRQ line.        */
 #define ECS_IPI          6 /* Channel is bound to a virtual IPI line.        */
+#define ECS_TAP          7 /* Channel is bound and tapped.                   */
     u8  state;             /* ECS_* */
     u8  consumer_is_xen;   /* Consumed by Xen or by guest? */
     u16 notify_vcpu_id;    /* VCPU for local delivery notification */
@@ -61,6 +63,11 @@
         u16 pirq;      /* state == ECS_PIRQ */
         u16 virq;      /* state == ECS_VIRQ */
     } u;
+    struct {
+        u8 mode;    /* Tap IN, OUT or both.  */
+        /* Fucntion to call when an event is detected. */
+        long (*redirect) (struct evtchn *lchn, struct evtchn *rchn);
+    } tap;
 #ifdef FLASK_ENABLE
     void *ssid;
 #endif
@@ -249,6 +256,9 @@
     /* OProfile support. */
     struct xenoprof *xenoprof;
     int32_t time_offset_seconds;
+
+    /* Kemari support. */
+    struct kemari *kemari;

     struct rcu_head rcu;

diff -r 19201eebab16 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Thu Sep 25 13:33:50 2008 +0100
+++ b/xen/include/xen/event.h   Wed Mar 04 17:04:30 2009 +0900
@@ -79,4 +79,18 @@
         mb(); /* set blocked status /then/ caller does his work */      \
     } while ( 0 )

+struct evtchn_bind_tap {
+    /* IN parameters. */
+    domid_t       tap_dom;
+    uint32_t      tap_port;
+    uint8_t       mode;
+    long          (*redirect) (struct evtchn *lchn, struct evtchn *rchn);
+};
+
+void notify_via_xen_evtchn_tap(struct domain *ld, int lport);
+
+long evtchn_bind_tap(struct evtchn_bind_tap *bind_tap);
+
+long evtchn_unbind_tap(struct evtchn_bind_tap *bind_tap);
+
 #endif /* __XEN_EVENT_H__ */

diff -r 19201eebab16 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Thu Sep 25 13:33:50 2008 +0100
+++ b/xen/common/event_channel.c        Wed Mar 04 17:04:28 2009 +0900
@@ -201,7 +201,8 @@
     if ( !port_is_valid(rd, rport) )
         ERROR_EXIT_DOM(-EINVAL, rd);
     rchn = evtchn_from_port(rd, rport);
-    if ( (rchn->state != ECS_UNBOUND) ||
+    /* kemari needs to reuse rchn information */
+    if ( (rchn->state != ECS_UNBOUND) &&
          (rchn->u.unbound.remote_domid != ld->domain_id) )
         ERROR_EXIT_DOM(-EINVAL, rd);

@@ -348,6 +349,113 @@
     return rc;
 }

+long evtchn_bind_tap(struct evtchn_bind_tap *bind_tap)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *ld, *rd;
+    int            lport = bind_tap->tap_port, rport;
+    domid_t        ldom = bind_tap->tap_dom;
+    long ret;
+
+    if ( (ld = rcu_lock_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
+
+    spin_lock(&ld->evtchn_lock);
+
+    ret = -EINVAL;
+    if ( !port_is_valid(ld, lport) )
+        goto lchn_out;
+    lchn = evtchn_from_port(ld, lport);
+    if ( lchn->state != ECS_INTERDOMAIN )
+        goto lchn_out;
+
+    ret = -ESRCH;
+    rd = lchn->u.interdomain.remote_dom;
+    if ( rd == NULL )
+        goto lchn_out;
+
+    spin_lock(&rd->evtchn_lock);
+
+    rport = lchn->u.interdomain.remote_port;
+    if ( !port_is_valid(rd, rport) )
+        goto rchn_out;
+    rchn = evtchn_from_port(rd, rport);
+    if ( rchn->state != ECS_INTERDOMAIN )
+        goto rchn_out;
+
+    lchn->state = ECS_TAP;
+    lchn->tap.mode = bind_tap->mode;
+    lchn->tap.redirect = bind_tap->redirect;
+
+    rchn->state = ECS_TAP;
+    rchn->tap.redirect = bind_tap->redirect;
+
+    ret = 0;
+
+ rchn_out:
+    spin_unlock(&rd->evtchn_lock);
+
+ lchn_out:
+    spin_unlock(&ld->evtchn_lock);
+
+    rcu_unlock_domain(ld);
+
+    return ret;
+}
+
+long evtchn_unbind_tap(struct evtchn_bind_tap *bind_tap)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *ld, *rd;
+    int            lport = bind_tap->tap_port, rport;
+    domid_t        ldom = bind_tap->tap_dom;
+    long ret;
+
+    if ( (ld = rcu_lock_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
+
+    spin_lock(&ld->evtchn_lock);
+
+    ret = -EINVAL;
+    if ( !port_is_valid(ld, lport) )
+        goto lchn_out;
+    lchn = evtchn_from_port(ld, lport);
+    if ( lchn->state != ECS_TAP )
+        goto lchn_out;
+
+    ret = -ESRCH;
+    rd = lchn->u.interdomain.remote_dom;
+    if ( rd == NULL )
+        goto lchn_out;
+
+    spin_lock(&rd->evtchn_lock);
+
+    rport = lchn->u.interdomain.remote_port;
+    if ( !port_is_valid(rd, rport) )
+        goto rchn_out;
+    rchn = evtchn_from_port(rd, rport);
+    if ( rchn->state != ECS_TAP )
+        goto rchn_out;
+
+    lchn->state = ECS_INTERDOMAIN;
+    lchn->tap.mode = bind_tap->mode;
+    lchn->tap.redirect = NULL;
+
+    rchn->state = ECS_INTERDOMAIN;
+    rchn->tap.redirect = NULL;
+
+    ret = 0;
+
+ rchn_out:
+    spin_unlock(&rd->evtchn_lock);
+
+ lchn_out:
+    spin_unlock(&ld->evtchn_lock);
+
+    rcu_unlock_domain(ld);
+
+    return ret;
+}

 static long __evtchn_close(struct domain *d1, int port1)
 {
@@ -403,6 +511,7 @@
     case ECS_IPI:
         break;

+    case ECS_TAP:
     case ECS_INTERDOMAIN:
         if ( d2 == NULL )
         {
@@ -440,6 +549,14 @@
         BUG_ON(!port_is_valid(d2, port2));

         chn2 = evtchn_from_port(d2, port2);
+
+        if ( chn1->state == ECS_TAP )
+        {
+            chn1->tap.redirect = NULL;
+            chn2->tap.redirect = NULL;
+            chn2->state = ECS_INTERDOMAIN;
+        }
+
         BUG_ON(chn2->state != ECS_INTERDOMAIN);
         BUG_ON(chn2->u.interdomain.remote_dom != d1);

@@ -509,6 +626,13 @@

     switch ( lchn->state )
     {
+    case ECS_TAP:
+        rd    = lchn->u.interdomain.remote_dom;
+        rport = lchn->u.interdomain.remote_port;
+        rchn  = evtchn_from_port(rd, rport);
+
+        lchn->tap.redirect(lchn, rchn);
+
     case ECS_INTERDOMAIN:
         rd    = lchn->u.interdomain.remote_dom;
         rport = lchn->u.interdomain.remote_port;
@@ -1037,6 +1161,30 @@
     spin_unlock(&ld->evtchn_lock);
 }

+void notify_via_xen_evtchn_tap(struct domain *ld, int lport)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *rd;
+    int            rport;
+
+    if (ld != current->domain)
+        spin_lock(&ld->evtchn_lock);
+
+    ASSERT(port_is_valid(ld, lport));
+    lchn = evtchn_from_port(ld, lport);
+    ASSERT(lchn->consumer_is_xen);
+
+    if ( likely(lchn->state == ECS_INTERDOMAIN) )
+    {
+        rd    = lchn->u.interdomain.remote_dom;
+        rport = lchn->u.interdomain.remote_port;
+        rchn  = evtchn_from_port(rd, rport);
+        evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+    }
+
+    if (ld != current->domain)
+        spin_unlock(&ld->evtchn_lock);
+}

 int evtchn_init(struct domain *d)
 {



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.