[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/2] xen/event: Add reference counting to event channel



On Tue, 2011-10-18 at 22:04 +0100, Daniel De Graaf wrote:
> Event channels exposed to userspace by the evtchn module may be used by
> other modules in an asynchronous manner, which requires that reference
> counting be used to prevent the event channel from being closed before
> the signals are delivered.
> 
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
>  drivers/xen/events.c |   34 ++++++++++++++++++++++++++++++++++
>  include/xen/events.h |    6 ++++++
>  2 files changed, 40 insertions(+), 0 deletions(-)
> 
> diff --git a/drivers/xen/events.c b/drivers/xen/events.c
> index 7523719..36d3390 100644
> --- a/drivers/xen/events.c
> +++ b/drivers/xen/events.c
> @@ -88,6 +88,7 @@ enum xen_irq_type {
>  struct irq_info
>  {
>       struct list_head list;
> +     atomic_t refcount;
>       enum xen_irq_type type; /* type */
>       unsigned irq;
>       unsigned short evtchn;  /* event channel */
> @@ -407,6 +408,7 @@ static void xen_irq_init(unsigned irq)
>               panic("Unable to allocate metadata for IRQ%d\n", irq);
>  
>       info->type = IRQT_UNBOUND;
> +     atomic_set(&info->refcount, 1);
>  
>       irq_set_handler_data(irq, info);
>  
> @@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
>  
>       irq_set_handler_data(irq, NULL);
>  
> +     BUG_ON(atomic_read(&info->refcount) > 1);

Should this be > 0 ? If we get here with a reference count still held
isn't that indicative of an issue?

Is this just because of the various error paths which call xen_free_irq
to clean up without also dropping the initial reference from
xen_irq_init? I think either these paths should just drop the reference
or the initial refcount should be 0 and the initial reference should be
taken only after the caller of xen_irq_init has actually succeeded.

> +
>       kfree(info);
>  
>       /* Legacy IRQ descriptors are managed by the arch. */
> @@ -912,6 +916,10 @@ static void unbind_from_irq(unsigned int irq)
>  {
>       struct evtchn_close close;
>       int evtchn = evtchn_from_irq(irq);
> +     struct irq_info *info = irq_get_handler_data(irq);
> +
> +     if (!atomic_dec_and_test(&info->refcount))
> +             return;
>  
>       mutex_lock(&irq_mapping_update_lock);
>  
> @@ -1038,6 +1046,32 @@ void unbind_from_irqhandler(unsigned int irq, void 
> *dev_id)
>  }
>  EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
>  
> +int evtchn_get(unsigned int evtchn)
> +{
> +     int irq = evtchn_to_irq[evtchn];
> +     struct irq_info *info;
> +
> +     if (irq == -1)
> +             return -ENOENT;
> +
> +     info = irq_get_handler_data(irq);
> +
> +     if (!info)
> +             return -ENOENT;
> +
> +     atomic_inc(&info->refcount);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL_GPL(evtchn_get);
> +
> +void evtchn_put(unsigned int evtchn)
> +{
> +     int irq = evtchn_to_irq[evtchn];
> +     unbind_from_irq(irq);
> +}
> +EXPORT_SYMBOL_GPL(evtchn_put);
> +
>  void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
>  {
>       int irq = per_cpu(ipi_to_irq, cpu)[vector];
> diff --git a/include/xen/events.h b/include/xen/events.h
> index d287997..a459cca 100644
> --- a/include/xen/events.h
> +++ b/include/xen/events.h
> @@ -37,6 +37,12 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int 
> remote_domain,
>   */
>  void unbind_from_irqhandler(unsigned int irq, void *dev_id);
>  
> +/*
> + * Allow extra references to event channels exposed to userspace by evtchn
> + */
> +int evtchn_get(unsigned int evtchn);
> +void evtchn_put(unsigned int evtchn);
> +
>  void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
>  int resend_irq_on_evtchn(unsigned int irq);
>  void rebind_evtchn_irq(int evtchn, int irq);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.