|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 10/13] xen/events: Refactor evtchn_to_irq array to be dynamically allocated
On Fri, Sep 13, 2013 at 05:59:58PM +0100, David Vrabel wrote:
> From: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx>
>
> Refactor static array evtchn_to_irq array to be dynamically allocated by
> implementing get and set functions for accesses to the array.
>
> Two new port ops are added: max_channels (maximum supported number of
> event channels) and nr_channels (number of currently usable event
> channels). For the N-level ABI, these numbers are both the same as
> the shared data structure is a fixed size. For the FIFO ABI, these
^^ has
> will be different as the event array is expanded dynamically.
>
> This allows more than 65000 event channels so an unsigned short is no
> longer sufficient for an event channel port number and unsigned int is
> used instead.
>
> Signed-off-by: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx>
> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
> ---
> drivers/xen/events/events.c | 129
> +++++++++++++++++++++++++---------
> drivers/xen/events/events_internal.h | 18 ++++-
> drivers/xen/events/n-level.c | 11 +++-
> 3 files changed, 121 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/xen/events/events.c b/drivers/xen/events/events.c
> index cf1c7ba..8660459 100644
> --- a/drivers/xen/events/events.c
> +++ b/drivers/xen/events/events.c
> @@ -77,12 +77,16 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0
> ... NR_VIRQS-1] = -1};
> /* IRQ <-> IPI mapping */
> static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ...
> XEN_NR_IPIS-1] = -1};
>
> -int *evtchn_to_irq;
> +int **evtchn_to_irq;
> #ifdef CONFIG_X86
> static unsigned long *pirq_eoi_map;
> #endif
> static bool (*pirq_needs_eoi)(unsigned irq);
>
> +#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
> +#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
> +#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
> +
> /* Xen will never allocate port zero for any purpose. */
> #define VALID_EVTCHN(chn) ((chn) != 0)
>
> @@ -92,6 +96,61 @@ static struct irq_chip xen_pirq_chip;
> static void enable_dynirq(struct irq_data *data);
> static void disable_dynirq(struct irq_data *data);
>
> +static void clear_evtchn_to_irq_row(unsigned row)
> +{
> + unsigned col;
> +
> + for (col = 0; col < EVTCHN_PER_ROW; col++)
> + evtchn_to_irq[row][col] = -1;
> +}
> +
> +static void clear_evtchn_to_irq_all(void)
> +{
> + unsigned row;
> +
> + for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
> + if (evtchn_to_irq[row] == NULL)
> + continue;
> + clear_evtchn_to_irq_row(row);
> + }
> +}
> +
> +static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
> +{
> + unsigned row;
> + unsigned col;
> +
> + if (evtchn >= xen_evtchn_max_channels())
> + return -EINVAL;
> +
> + row = EVTCHN_ROW(evtchn);
> + col = EVTCHN_COL(evtchn);
> +
> + if (evtchn_to_irq[row] == NULL) {
> + /* Unallocated irq entries return -1 anyway */
^^^^^ - are
> + if (irq == -1)
> + return 0;
> +
> + evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
> + if (evtchn_to_irq[row] == NULL)
> + return -ENOMEM;
> +
> + clear_evtchn_to_irq_row(row);
> + }
> +
> + evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
> + return 0;
> +}
> +
> +int get_evtchn_to_irq(unsigned evtchn)
> +{
> + if (evtchn >= xen_evtchn_max_channels())
> + return -1;
> + if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
> + return -1;
> + return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
> +}
> +
> /* Get info for IRQ */
> struct irq_info *info_for_irq(unsigned irq)
> {
> @@ -102,7 +161,7 @@ struct irq_info *info_for_irq(unsigned irq)
> static int xen_irq_info_common_setup(struct irq_info *info,
> unsigned irq,
> enum xen_irq_type type,
> - unsigned short evtchn,
> + unsigned evtchn,
> unsigned short cpu)
> {
>
> @@ -113,7 +172,8 @@ static int xen_irq_info_common_setup(struct irq_info
> *info,
> info->evtchn = evtchn;
> info->cpu = cpu;
>
> - evtchn_to_irq[evtchn] = irq;
> + if (set_evtchn_to_irq(evtchn, irq))
> + return -ENOMEM;
>
> irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
>
> @@ -121,7 +181,7 @@ static int xen_irq_info_common_setup(struct irq_info
> *info,
> }
>
> static int xen_irq_info_evtchn_setup(unsigned irq,
> - unsigned short evtchn)
> + unsigned evtchn)
> {
> struct irq_info *info = info_for_irq(irq);
>
> @@ -130,7 +190,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
>
> static int xen_irq_info_ipi_setup(unsigned cpu,
> unsigned irq,
> - unsigned short evtchn,
> + unsigned evtchn,
> enum ipi_vector ipi)
> {
> struct irq_info *info = info_for_irq(irq);
> @@ -144,8 +204,8 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
>
> static int xen_irq_info_virq_setup(unsigned cpu,
> unsigned irq,
> - unsigned short evtchn,
> - unsigned short virq)
> + unsigned evtchn,
> + unsigned virq)
> {
> struct irq_info *info = info_for_irq(irq);
>
> @@ -157,9 +217,9 @@ static int xen_irq_info_virq_setup(unsigned cpu,
> }
>
> static int xen_irq_info_pirq_setup(unsigned irq,
> - unsigned short evtchn,
> - unsigned short pirq,
> - unsigned short gsi,
> + unsigned evtchn,
> + unsigned pirq,
> + unsigned gsi,
> uint16_t domid,
> unsigned char flags)
> {
> @@ -186,7 +246,7 @@ static unsigned int evtchn_from_irq(unsigned irq)
>
> unsigned irq_from_evtchn(unsigned int evtchn)
> {
> - return evtchn_to_irq[evtchn];
> + return get_evtchn_to_irq(evtchn);
> }
> EXPORT_SYMBOL_GPL(irq_from_evtchn);
>
> @@ -232,7 +292,7 @@ unsigned cpu_from_irq(unsigned irq)
>
> unsigned int cpu_from_evtchn(unsigned int evtchn)
> {
> - int irq = evtchn_to_irq[evtchn];
> + int irq = get_evtchn_to_irq(evtchn);
> unsigned ret = 0;
>
> if (irq != -1)
> @@ -258,7 +318,7 @@ static bool pirq_needs_eoi_flag(unsigned irq)
>
> static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
> {
> - int irq = evtchn_to_irq[chn];
> + int irq = get_evtchn_to_irq(chn);
> struct irq_info *info = info_for_irq(irq);
>
> BUG_ON(irq == -1);
> @@ -453,7 +513,9 @@ static unsigned int __startup_pirq(unsigned int irq)
>
> pirq_query_unmask(irq);
>
> - evtchn_to_irq[evtchn] = irq;
> + rc = set_evtchn_to_irq(evtchn, irq);
> + if (rc != 0)
> + return 0;
I think you also need to unroll the hypercall you did (EVTCHNOP_bind_pirq).
I think doing EVTCHNOP_close should suffice?
> bind_evtchn_to_cpu(evtchn, 0);
> info->evtchn = evtchn;
>
> @@ -474,7 +536,7 @@ static void shutdown_pirq(struct irq_data *data)
> struct evtchn_close close;
> unsigned int irq = data->irq;
> struct irq_info *info = info_for_irq(irq);
> - int evtchn = evtchn_from_irq(irq);
> + int rc, evtchn = evtchn_from_irq(irq);
>
> BUG_ON(info->type != IRQT_PIRQ);
>
> @@ -488,7 +550,7 @@ static void shutdown_pirq(struct irq_data *data)
> BUG();
>
> bind_evtchn_to_cpu(evtchn, 0);
> - evtchn_to_irq[evtchn] = -1;
> + rc = set_evtchn_to_irq(evtchn, -1);
> info->evtchn = 0;
> }
>
> @@ -551,7 +613,7 @@ static void __unbind_from_irq(unsigned int irq)
> /* Closed ports are implicitly re-bound to VCPU0. */
> bind_evtchn_to_cpu(evtchn, 0);
>
> - evtchn_to_irq[evtchn] = -1;
> + set_evtchn_to_irq(evtchn, -1);
> }
>
> BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
> @@ -755,9 +817,12 @@ int bind_evtchn_to_irq(unsigned int evtchn)
> int irq;
> int ret;
>
> + if (evtchn >= xen_evtchn_max_channels())
> + return -ENOMEM;
> +
> mutex_lock(&irq_mapping_update_lock);
>
> - irq = evtchn_to_irq[evtchn];
> + irq = get_evtchn_to_irq(evtchn);
>
> if (irq == -1) {
> irq = xen_allocate_irq_dynamic();
> @@ -847,7 +912,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
> int port, rc = -ENOENT;
>
> memset(&status, 0, sizeof(status));
> - for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
> + for (port = 0; port < xen_evtchn_max_channels(); port++) {
> status.dom = DOMID_SELF;
> status.port = port;
> rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
> @@ -927,8 +992,9 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
> int irq, retval;
>
> irq = bind_evtchn_to_irq(evtchn);
> - if (irq < 0)
> + if (irq < 0){
> return irq;
> + }
> retval = request_irq(irq, handler, irqflags, devname, dev_id);
> if (retval != 0) {
> unbind_from_irq(irq);
> @@ -1017,7 +1083,7 @@ EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
>
> int evtchn_make_refcounted(unsigned int evtchn)
> {
> - int irq = evtchn_to_irq[evtchn];
> + int irq = get_evtchn_to_irq(evtchn);
> struct irq_info *info;
>
> if (irq == -1)
> @@ -1042,12 +1108,12 @@ int evtchn_get(unsigned int evtchn)
> struct irq_info *info;
> int err = -ENOENT;
>
> - if (evtchn >= NR_EVENT_CHANNELS)
> + if (evtchn >= xen_evtchn_max_channels())
> return -EINVAL;
>
> mutex_lock(&irq_mapping_update_lock);
>
> - irq = evtchn_to_irq[evtchn];
> + irq = get_evtchn_to_irq(evtchn);
> if (irq == -1)
> goto done;
>
> @@ -1071,7 +1137,7 @@ EXPORT_SYMBOL_GPL(evtchn_get);
>
> void evtchn_put(unsigned int evtchn)
> {
> - int irq = evtchn_to_irq[evtchn];
> + int irq = get_evtchn_to_irq(evtchn);
> if (WARN_ON(irq == -1))
> return;
> unbind_from_irq(irq);
> @@ -1158,7 +1224,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
> mutex_lock(&irq_mapping_update_lock);
>
> /* After resume the irq<->evtchn mappings are all cleared out */
> - BUG_ON(evtchn_to_irq[evtchn] != -1);
> + BUG_ON(get_evtchn_to_irq(evtchn) != -1);
> /* Expect irq to have been bound before,
> so there should be a proper type */
> BUG_ON(info->type == IRQT_UNBOUND);
> @@ -1443,15 +1509,14 @@ void xen_irq_resume(void)
> struct irq_info *info;
>
> /* New event-channel space is not 'live' yet. */
> - for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
> + for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
> mask_evtchn(evtchn);
>
> /* No IRQ <-> event-channel mappings. */
> list_for_each_entry(info, &xen_irq_list_head, list)
> info->evtchn = 0; /* zap event-channel binding */
>
> - for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
> - evtchn_to_irq[evtchn] = -1;
> + clear_evtchn_to_irq_all();
>
> for_each_possible_cpu(cpu) {
> restore_cpu_virqs(cpu);
> @@ -1548,14 +1613,12 @@ void __init xen_init_IRQ(void)
>
> xen_evtchn_init_nlevel();
>
> - evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
> - GFP_KERNEL);
> + evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
> + sizeof(*evtchn_to_irq), GFP_KERNEL);
> BUG_ON(!evtchn_to_irq);
> - for (i = 0; i < NR_EVENT_CHANNELS; i++)
> - evtchn_to_irq[i] = -1;
>
> /* No event channels are 'live' right now. */
> - for (i = 0; i < NR_EVENT_CHANNELS; i++)
> + for (i = 0; i < xen_evtchn_nr_channels(); i++)
> mask_evtchn(i);
>
> pirq_needs_eoi = pirq_needs_eoi_flag;
> diff --git a/drivers/xen/events/events_internal.h
> b/drivers/xen/events/events_internal.h
> index 32cb928..9d8b70c 100644
> --- a/drivers/xen/events/events_internal.h
> +++ b/drivers/xen/events/events_internal.h
> @@ -35,7 +35,7 @@ struct irq_info {
> int refcnt;
> enum xen_irq_type type; /* type */
> unsigned irq;
> - unsigned short evtchn; /* event channel */
> + unsigned int evtchn; /* event channel */
> unsigned short cpu; /* cpu bound */
>
> union {
> @@ -55,6 +55,9 @@ struct irq_info {
> #define PIRQ_SHAREABLE (1 << 1)
>
> struct evtchn_ops {
> + unsigned (*max_channels)(void);
> + unsigned (*nr_channels)(void);
> +
> int (*setup)(struct irq_info *info);
> void (*bind_to_cpu)(struct irq_info *info, int cpu);
>
> @@ -70,12 +73,23 @@ struct evtchn_ops {
>
> extern const struct evtchn_ops *evtchn_ops;
>
> -extern int *evtchn_to_irq;
> +extern int **evtchn_to_irq;
> +int get_evtchn_to_irq(unsigned int evtchn);
>
> struct irq_info *info_for_irq(unsigned irq);
> unsigned cpu_from_irq(unsigned irq);
> unsigned cpu_from_evtchn(unsigned int evtchn);
>
> +static inline unsigned xen_evtchn_max_channels(void)
> +{
> + return evtchn_ops->max_channels();
> +}
> +
> +static inline unsigned xen_evtchn_nr_channels(void)
> +{
> + return evtchn_ops->nr_channels();
> +}
> +
> static inline int xen_evtchn_port_setup(struct irq_info *info)
> {
> if (evtchn_ops->setup)
> diff --git a/drivers/xen/events/n-level.c b/drivers/xen/events/n-level.c
> index 6c0662e..da8d0aa 100644
> --- a/drivers/xen/events/n-level.c
> +++ b/drivers/xen/events/n-level.c
> @@ -39,6 +39,11 @@
> static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
> cpu_evtchn_mask);
>
> +static unsigned nlevel_max_channels(void)
> +{
> + return NR_EVENT_CHANNELS;
> +}
> +
> static void nlevel_bind_to_cpu(struct irq_info *info, int cpu)
> {
> clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
> @@ -212,7 +217,7 @@ static void nlevel_handle_events(int cpu)
>
> /* Process port. */
> port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
> - irq = evtchn_to_irq[port];
> + irq = get_evtchn_to_irq(port);
>
> if (irq != -1) {
> desc = irq_to_desc(irq);
> @@ -306,7 +311,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
> int word_idx = i / BITS_PER_EVTCHN_WORD;
> printk(" %d: event %d -> irq %d%s%s%s\n",
> cpu_from_evtchn(i), i,
> - evtchn_to_irq[i],
> + get_evtchn_to_irq(i),
> sync_test_bit(word_idx,
> BM(&v->evtchn_pending_sel))
> ? "" : " l2-clear",
> !sync_test_bit(i, BM(sh->evtchn_mask))
> @@ -322,6 +327,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
> }
>
> static const struct evtchn_ops evtchn_ops_nlevel = {
> + .max_channels = nlevel_max_channels,
> + .nr_channels = nlevel_max_channels,
> .bind_to_cpu = nlevel_bind_to_cpu,
> .clear_pending = nlevel_clear_pending,
> .set_pending = nlevel_set_pending,
> --
> 1.7.2.5
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |