|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] RE: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism
> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of Roger
> Pau Monne
> Sent: 30 September 2020 11:41
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>;
> Andrew Cooper
> <andrew.cooper3@xxxxxxxxxx>; Wei Liu <wl@xxxxxxx>
> Subject: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback
> mechanism
>
> Switch the emulated IO-APIC code to use the local APIC EOI callback
> mechanism. This allows to remove the last hardcoded callback from
> vlapic_handle_EOI. Removing the hardcoded vIO-APIC callback also
> allows to getting rid of setting the EOI exit bitmap based on the
> triggering mode, as now all users that require an EOI action use the
> newly introduced callback mechanism.
>
> Move and rename the vioapic_update_EOI now that it can be made static.
>
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> ---
> Changes since v1:
> - Remove the triggering check in the update_eoi_exit_bitmap call.
> - Register the vlapic callbacks when loading the vIO-APIC state.
> - Reduce scope of ent.
> ---
> xen/arch/x86/hvm/vioapic.c | 131 ++++++++++++++++++++++++-------------
> xen/arch/x86/hvm/vlapic.c | 5 +-
> 2 files changed, 86 insertions(+), 50 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
> index 752fc410db..dce98b1479 100644
> --- a/xen/arch/x86/hvm/vioapic.c
> +++ b/xen/arch/x86/hvm/vioapic.c
> @@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
> .write = vioapic_write
> };
>
> +static void eoi_callback(unsigned int vector, void *data)
> +{
> + struct domain *d = current->domain;
> + struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> + unsigned int i;
> +
> + ASSERT(has_vioapic(d));
> +
> + spin_lock(&d->arch.hvm.irq_lock);
> +
> + for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> + {
> + struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> + unsigned int pin;
> +
> + for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> + {
> + union vioapic_redir_entry *ent = &vioapic->redirtbl[pin];
> +
> + if ( ent->fields.vector != vector )
> + continue;
> +
> + ent->fields.remote_irr = 0;
> +
> + if ( is_iommu_enabled(d) )
> + {
> + spin_unlock(&d->arch.hvm.irq_lock);
> + hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
> + spin_lock(&d->arch.hvm.irq_lock);
Is this safe? If so, why lock around the whole loop in the first place?
Paul
> + }
> +
> + if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
> + !ent->fields.mask &&
> + hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
> + {
> + ent->fields.remote_irr = 1;
> + vioapic_deliver(vioapic, pin);
> + }
> + }
> + }
> +
> + spin_unlock(&d->arch.hvm.irq_lock);
> +}
> +
> static void ioapic_inj_irq(
> struct hvm_vioapic *vioapic,
> struct vlapic *target,
> @@ -388,7 +432,8 @@ static void ioapic_inj_irq(
> ASSERT((delivery_mode == dest_Fixed) ||
> (delivery_mode == dest_LowestPrio));
>
> - vlapic_set_irq(target, vector, trig_mode);
> + vlapic_set_irq_callback(target, vector, trig_mode,
> + trig_mode ? eoi_callback : NULL, NULL);
> }
>
> static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
> @@ -495,50 +540,6 @@ void vioapic_irq_positive_edge(struct domain *d,
> unsigned int irq)
> }
> }
>
> -void vioapic_update_EOI(unsigned int vector)
> -{
> - struct domain *d = current->domain;
> - struct hvm_irq *hvm_irq = hvm_domain_irq(d);
> - union vioapic_redir_entry *ent;
> - unsigned int i;
> -
> - ASSERT(has_vioapic(d));
> -
> - spin_lock(&d->arch.hvm.irq_lock);
> -
> - for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
> - {
> - struct hvm_vioapic *vioapic = domain_vioapic(d, i);
> - unsigned int pin;
> -
> - for ( pin = 0; pin < vioapic->nr_pins; pin++ )
> - {
> - ent = &vioapic->redirtbl[pin];
> - if ( ent->fields.vector != vector )
> - continue;
> -
> - ent->fields.remote_irr = 0;
> -
> - if ( is_iommu_enabled(d) )
> - {
> - spin_unlock(&d->arch.hvm.irq_lock);
> - hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
> - spin_lock(&d->arch.hvm.irq_lock);
> - }
> -
> - if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
> - !ent->fields.mask &&
> - hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
> - {
> - ent->fields.remote_irr = 1;
> - vioapic_deliver(vioapic, pin);
> - }
> - }
> - }
> -
> - spin_unlock(&d->arch.hvm.irq_lock);
> -}
> -
> int vioapic_get_mask(const struct domain *d, unsigned int gsi)
> {
> unsigned int pin = 0; /* See gsi_vioapic */
> @@ -592,6 +593,8 @@ static int ioapic_save(struct vcpu *v,
> hvm_domain_context_t *h)
> static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
> {
> struct hvm_vioapic *s;
> + unsigned int i;
> + int rc;
>
> if ( !has_vioapic(d) )
> return -ENODEV;
> @@ -602,7 +605,43 @@ static int ioapic_load(struct domain *d,
> hvm_domain_context_t *h)
> d->arch.hvm.nr_vioapics != 1 )
> return -EOPNOTSUPP;
>
> - return hvm_load_entry(IOAPIC, h, &s->domU);
> + rc = hvm_load_entry(IOAPIC, h, &s->domU);
> + if ( rc )
> + return rc;
> +
> + for ( i = 0; i < ARRAY_SIZE(s->domU.redirtbl); i++ )
> + {
> + const union vioapic_redir_entry *ent = &s->domU.redirtbl[i];
> + unsigned int vector = ent->fields.vector;
> + unsigned int delivery_mode = ent->fields.delivery_mode;
> + struct vcpu *v;
> +
> + /*
> + * Add a callback for each possible vector injected by a redirection
> + * entry.
> + */
> + if ( vector < 16 || !ent->fields.remote_irr ||
> + (delivery_mode != dest_LowestPrio && delivery_mode !=
> dest_Fixed) )
> + continue;
> +
> + for_each_vcpu ( d, v )
> + {
> + struct vlapic *vlapic = vcpu_vlapic(v);
> +
> + /*
> + * NB: if the vlapic registers where restored before the vio-apic
> + * ones we could test whether the vector is set in the vlapic IRR
> + * or ISR registers before unconditionally setting the callback.
> + * This is harmless as eoi_callback is capable of dealing with
> + * spurious callbacks.
> + */
> + if ( vlapic_match_dest(vlapic, NULL, 0, ent->fields.dest_id,
> + ent->fields.dest_mode) )
> + vlapic_set_callback(vlapic, vector, eoi_callback, NULL);
> + }
> + }
> +
> + return 0;
> }
>
> HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1,
> HVMSR_PER_DOM);
> diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
> index 8a18b33428..35f12e0909 100644
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -189,7 +189,7 @@ void vlapic_set_irq_callback(struct vlapic *vlapic,
> uint8_t vec, uint8_t trig,
>
> if ( hvm_funcs.update_eoi_exit_bitmap )
> alternative_vcall(hvm_funcs.update_eoi_exit_bitmap, target, vec,
> - trig || callback);
> + callback);
>
> if ( hvm_funcs.deliver_posted_intr )
> alternative_vcall(hvm_funcs.deliver_posted_intr, target, vec);
> @@ -493,9 +493,6 @@ void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
> unsigned long flags;
> unsigned int index = vector - 16;
>
> - if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
> - vioapic_update_EOI(vector);
> -
> spin_lock_irqsave(&vlapic->callback_lock, flags);
> callback = vlapic->callbacks[index].callback;
> vlapic->callbacks[index].callback = NULL;
> --
> 2.28.0
>
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |