|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 16/16] Implement 3-level event channel routines
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
xen/common/event_channel.c | 110 ++++++++++++++++++++++++++++++++++++--------
1 file changed, 90 insertions(+), 20 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c448c60..a0bd00f 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -627,10 +627,33 @@ out:
return ret;
}
+static void __check_vcpu_polling(struct vcpu *v, int port)
+{
+ int vcpuid;
+ struct domain *d = v->domain;
+
+ /* Check if some VCPU might be polling for this event. */
+ if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+ return;
+
+ /* Wake any interested (or potentially interested) pollers. */
+ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+ vcpuid < d->max_vcpus;
+ vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+ {
+ v = d->vcpu[vcpuid];
+ if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+ test_and_clear_bit(vcpuid, d->poll_mask) )
+ {
+ v->poll_evtchn = 0;
+ vcpu_unblock(v);
+ }
+ }
+}
+
static void evtchn_set_pending_l2(struct vcpu *v, int port)
{
struct domain *d = v->domain;
- int vcpuid;
/*
* The following bit operations must happen in strict order.
@@ -649,23 +672,35 @@ static void evtchn_set_pending_l2(struct vcpu *v, int
port)
vcpu_mark_events_pending(v);
}
- /* Check if some VCPU might be polling for this event. */
- if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
- return;
+ __check_vcpu_polling(v, port);
+}
- /* Wake any interested (or potentially interested) pollers. */
- for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
- vcpuid < d->max_vcpus;
- vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+static void evtchn_set_pending_l3(struct vcpu *v, int port)
+{
+ struct domain *d = v->domain;
+ unsigned int page_no = EVTCHN_PAGE_NO(port);
+ unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+ unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+ unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+ /*
+ * The following bit operations must happen in strict order.
+ * NB. On x86, the atomic bit operations also act as memory barriers.
+ * There is therefore sufficiently strict ordering for this architecture --
+ * others may require explicit memory barriers.
+ */
+
+ if ( test_and_set_bit(offset, d->evtchn_pending[page_no]) )
+ return;
+
+ if ( !test_bit(offset, d->evtchn_mask[page_no]) &&
+ !test_and_set_bit(l2bit, v->evtchn_pending_sel_l2) &&
+ !test_and_set_bit(l1bit, &vcpu_info(v, evtchn_pending_sel)) )
{
- v = d->vcpu[vcpuid];
- if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
- test_and_clear_bit(vcpuid, d->poll_mask) )
- {
- v->poll_evtchn = 0;
- vcpu_unblock(v);
- }
+ vcpu_mark_events_pending(v);
}
+
+ __check_vcpu_polling(v, port);
}
static void evtchn_set_pending(struct vcpu *v, int port)
@@ -677,6 +712,9 @@ static void evtchn_set_pending(struct vcpu *v, int port)
case EVTCHN_2_LEVEL:
evtchn_set_pending_l2(v, port);
break;
+ case 3:
+ evtchn_set_pending_l3(v, port);
+ break;
default:
BUG();
}
@@ -981,6 +1019,37 @@ static int evtchn_unmask_l2(unsigned int port)
return 0;
}
+static int evtchn_unmask_l3(unsigned int port)
+{
+ struct domain *d = current->domain;
+ struct vcpu *v;
+ unsigned int page_no = EVTCHN_PAGE_NO(port);
+ unsigned int offset = EVTCHN_OFFSET_IN_PAGE(port);
+ unsigned int l1bit = port >> (EVTCHN_WORD_BITORDER(d) << 1);
+ unsigned int l2bit = port >> EVTCHN_WORD_BITORDER(d);
+
+ ASSERT(spin_is_locked(&d->event_lock));
+
+ if ( unlikely(!port_is_valid(d, port)) )
+ return -EINVAL;
+
+ v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
+
+ /*
+ * These operations must happen in strict order. Based on
+ * include/xen/event.h:evtchn_set_pending().
+ */
+ if ( test_and_clear_bit(offset, d->evtchn_mask[page_no]) &&
+ test_bit (offset, d->evtchn_pending[page_no]) &&
+ !test_and_set_bit (l2bit, v->evtchn_pending_sel_l2) &&
+ !test_and_set_bit (l1bit, &vcpu_info(v, evtchn_pending_sel)) )
+ {
+ vcpu_mark_events_pending(v);
+ }
+
+ return 0;
+}
+
int evtchn_unmask(unsigned int port)
{
struct domain *d = current->domain;
@@ -991,6 +1060,9 @@ int evtchn_unmask(unsigned int port)
case EVTCHN_2_LEVEL:
rc = evtchn_unmask_l2(port);
break;
+ case 3:
+ rc = evtchn_unmask_l3(port);
+ break;
default:
BUG();
}
@@ -1390,10 +1462,6 @@ long do_event_channel_op(int cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(®, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_register_nlevel(®);
-
- /* XXX always fails this call because it is not yet completed */
- rc = -EINVAL;
-
break;
}
@@ -1602,8 +1670,10 @@ static void domain_dump_evtchn_info(struct domain *d)
bitmap_scnlistprintf(keyhandler_scratch, sizeof(keyhandler_scratch),
d->poll_mask, d->max_vcpus);
printk("Event channel information for domain %d:\n"
+ "Using %d-level event channel\n"
"Polling vCPUs: {%s}\n"
- " port [p/m]\n", d->domain_id, keyhandler_scratch);
+ " port [p/m]\n",
+ d->domain_id, d->evtchn_level, keyhandler_scratch);
spin_lock(&d->event_lock);
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |