|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 18/23] vixen: Introduce ECS_PROXY for event channel proxying
From: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Previously, we would keep proxied event channels as ECS_INTERDOMAIN
channel around. This works for most things, but has the problem
that EVTCHNOP_status is broken, and that EVTCHNOP_close does not
mark an event channel as free.
Introduce a separate ECS_PROXY to denote event channels that are
forwarded to the hypervisor we're running under.
This makes the code more readable in many places.
Signed-off-by: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Signed-off-by: Anthony Liguori <aliguori@xxxxxxxxxx>
---
xen/common/event_channel.c | 87 ++++++++++++++++++++++++++++++++++++++++------
xen/include/xen/event.h | 3 ++
xen/include/xen/sched.h | 1 +
3 files changed, 81 insertions(+), 10 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c69f9db..85ff7e0 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -30,6 +30,7 @@
#include <public/xen.h>
#include <public/event_channel.h>
#include <xsm/xsm.h>
+#include <asm/guest/vixen.h>
#define ERROR_EXIT(_errno) \
do { \
@@ -156,25 +157,25 @@ static void free_evtchn_bucket(struct domain *d, struct
evtchn *bucket)
xfree(bucket);
}
-static int get_free_port(struct domain *d)
+static int allocate_port(struct domain *d, int port)
{
struct evtchn *chn;
struct evtchn **grp;
- int port;
if ( d->is_dying )
return -EINVAL;
- for ( port = 0; port_is_valid(d, port); port++ )
+ if ( port_is_valid(d, port) )
{
if ( port > d->max_evtchn_port )
return -ENOSPC;
if ( evtchn_from_port(d, port)->state == ECS_FREE
&& !evtchn_port_is_busy(d, port) )
return port;
+ return -EINVAL;
}
- if ( port == d->max_evtchns || port > d->max_evtchn_port )
+ if ( port >= d->max_evtchns || port > d->max_evtchn_port )
return -ENOSPC;
if ( !group_from_port(d, port) )
@@ -185,16 +186,59 @@ static int get_free_port(struct domain *d)
group_from_port(d, port) = grp;
}
- chn = alloc_evtchn_bucket(d, port);
- if ( !chn )
- return -ENOMEM;
- bucket_from_port(d, port) = chn;
+ while ( d->valid_evtchns <= port )
+ {
+ chn = alloc_evtchn_bucket(d, d->valid_evtchns);
+ if ( !chn )
+ return -ENOMEM;
+ bucket_from_port(d, d->valid_evtchns) = chn;
- write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ }
return port;
}
+static int get_free_port(struct domain *d)
+{
+ int port;
+
+ for ( port = 0; port_is_valid(d, port); port++ )
+ {
+ if ( port > d->max_evtchn_port )
+ return -ENOSPC;
+ if ( evtchn_from_port(d, port)->state == ECS_FREE
+ && !evtchn_port_is_busy(d, port) )
+ break;
+ }
+
+ return allocate_port(d, port);
+}
+
+int evtchn_alloc_proxy(struct domain *d, int port, u8 ecs)
+{
+ struct evtchn *chn;
+ int rc;
+
+ if ( !is_vixen() )
+ return -ENOSYS;
+
+ rc = allocate_port(d, port);
+ if ( rc < 0 )
+ return rc;
+
+ chn = evtchn_from_port(d, port);
+ spin_lock(&chn->lock);
+ chn->state = ECS_PROXY;
+ evtchn_port_init(d, chn);
+
+ if ( ecs == ECS_INTERDOMAIN )
+ evtchn_port_set_pending(d, chn->notify_vcpu_id, chn);
+ spin_unlock(&chn->lock);
+
+ return 0;
+}
+
static void free_evtchn(struct domain *d, struct evtchn *chn)
{
/* Clear pending event to avoid unexpected behavior on re-bind. */
@@ -628,6 +672,9 @@ static long evtchn_close(struct domain *d1, int port1,
bool_t guest)
goto out;
+ case ECS_PROXY:
+ break;
+
default:
BUG();
}
@@ -690,6 +737,14 @@ int evtchn_send(struct domain *ld, unsigned int lport)
case ECS_UNBOUND:
/* silently drop the notification */
break;
+ case ECS_PROXY:
+ ret = -EINVAL;
+ if ( is_vixen() )
+ {
+ struct evtchn_send send = { .port = lport };
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -892,6 +947,10 @@ static long evtchn_status(evtchn_status_t *status)
case ECS_IPI:
status->status = EVTCHNSTAT_ipi;
break;
+ case ECS_PROXY:
+ BUG_ON(!is_vixen());
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, status);
+ break;
default:
BUG();
}
@@ -944,6 +1003,14 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int
vcpu_id)
case ECS_INTERDOMAIN:
chn->notify_vcpu_id = vcpu_id;
break;
+ case ECS_PROXY:
+ if ( is_vixen() && vixen_has_per_cpu_notifications() )
+ {
+ struct evtchn_bind_vcpu bind = { .port = port, .vcpu = vcpu_id };
+ HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind);
+ }
+ chn->notify_vcpu_id = vcpu_id;
+ break;
case ECS_PIRQ:
if ( chn->notify_vcpu_id == vcpu_id )
break;
@@ -1276,7 +1343,7 @@ int evtchn_init(struct domain *d)
d->valid_evtchns = EVTCHNS_PER_BUCKET;
spin_lock_init_prof(d, event_lock);
- if ( get_free_port(d) != 0 )
+ if ( allocate_port(d, 0) != 0 )
{
free_evtchn_bucket(d, d->evtchn);
return -EINVAL;
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 87915ea..f3febe6 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -71,6 +71,9 @@ void notify_via_xen_event_channel(struct domain *ld, int
lport);
/* Inject an event channel notification into the guest */
void arch_evtchn_inject(struct vcpu *v);
+/* Allocate a specific event channel as proxy. */
+int evtchn_alloc_proxy(struct domain *d, int port, u8 ecs);
+
/*
* Internal event channel object storage.
*
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 5ddf6a2..f0a773b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -93,6 +93,7 @@ struct evtchn
#define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
#define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
#define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
+#define ECS_PROXY 7 /* Channel is proxied to parent hypervisor. */
u8 state; /* ECS_* */
u8 xen_consumer:XEN_CONSUMER_BITS; /* Consumer in Xen if nonzero */
u8 pending:1;
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |