|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v2 2/6] xen/arm: ffa: Track hypervisor notifications in a bitmap
Hi Bertrand,
On Wed, Apr 29, 2026 at 7:44 AM Bertrand Marquis
<bertrand.marquis@xxxxxxx> wrote:
>
> Hypervisor notifications are currently tracked with a dedicated
> buff_full_pending boolean. The old RX-buffer-full path also exposed a
> pending indication indirectly via vm_pending, so
> FFA_NOTIFICATION_INFO_GET could clear that summary before the guest
> retrieved the Hypervisor notification bitmap with
> FFA_NOTIFICATION_GET.
>
> Replace the single boolean with a Hypervisor notification bitmap
> protected by notif_lock. INFO_GET reports pending when hyp_pending is
> non-zero, GET returns and clears the HYP bitmap under the lock, and
> RX-buffer-full now keeps notif_lock held across the local NPI
> decision. notif_irq_raised is only set when an NPI is actually
> injected, and is cleared once the local pending state is consumed.
>
> Initialize and clear the bitmap during domain lifecycle handling, and
> use ctx->ffa_id for bitmap create and destroy so the notification
> state stays tied to the cached FF-A endpoint ID.
>
> If the local injection attempt fails because no vCPU is online,
> hyp_pending remains set and notif_irq_raised remains clear. This
> keeps the RX-buffer-full notification pending until the guest
> retrieves it, without publishing a successful local IRQ state too
> early.
>
> Functional impact: RX-buffer-full remains pending in hyp_pending
> until FFA_NOTIFICATION_GET, and failed local NPI injection no longer
> leaves Xen thinking the interrupt was already raised.
>
> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
> ---
> Changes since v1:
> - clarify that v1 exposed RX-buffer-full indirectly via vm_pending
> - document that v2 keeps the HYP pending indication until
> FFA_NOTIFICATION_GET
> - keep RX-buffer-full pending state stable across failed local NPI
> injection attempts
> ---
> xen/arch/arm/tee/ffa_notif.c | 56 ++++++++++++++++++++++++++--------
> xen/arch/arm/tee/ffa_private.h | 15 +++++++--
> 2 files changed, 56 insertions(+), 15 deletions(-)
Looks good.
Reviewed-by: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
Cheers,
Jens
>
>
> diff --git a/xen/arch/arm/tee/ffa_notif.c b/xen/arch/arm/tee/ffa_notif.c
> index 07bc5cb3a430..a631481e3815 100644
> --- a/xen/arch/arm/tee/ffa_notif.c
> +++ b/xen/arch/arm/tee/ffa_notif.c
> @@ -19,7 +19,7 @@
> static bool __ro_after_init fw_notif_enabled;
> static unsigned int __ro_after_init notif_sri_irq;
>
> -static void inject_notif_pending(struct domain *d)
> +static bool inject_notif_pending(struct domain *d)
> {
> struct vcpu *v;
>
> @@ -33,13 +33,15 @@ static void inject_notif_pending(struct domain *d)
> if ( is_vcpu_online(v) )
> {
> vgic_inject_irq(d, v, GUEST_FFA_NOTIF_PEND_INTR_ID, true);
> - return;
> + return true;
> }
> }
>
> if ( printk_ratelimit() )
> printk(XENLOG_G_DEBUG "%pd: ffa: can't inject NPI, all vCPUs
> offline\n",
> d);
> +
> + return false;
> }
>
> int32_t ffa_handle_notification_bind(struct cpu_user_regs *regs)
> @@ -94,8 +96,15 @@ void ffa_handle_notification_info_get(struct cpu_user_regs
> *regs)
>
> notif_pending = test_and_clear_bool(ctx->notif.secure_pending);
> if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> + {
> notif_pending |= test_and_clear_bool(ctx->notif.vm_pending);
>
> + spin_lock(&ctx->notif.notif_lock);
> + if ( ctx->notif.hyp_pending )
> + notif_pending = true;
> + spin_unlock(&ctx->notif.notif_lock);
> + }
> +
> if ( notif_pending )
> {
> /* A pending global notification for the guest */
> @@ -174,12 +183,19 @@ void ffa_handle_notification_get(struct cpu_user_regs
> *regs)
> w6 = resp.a6;
> }
>
> - if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) &&
> - flags & FFA_NOTIF_FLAG_BITMAP_HYP &&
> - test_and_clear_bool(ctx->notif.buff_full_pending) )
> + if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> {
> - ACCESS_ONCE(ctx->notif.vm_pending) = false;
> - w7 = FFA_NOTIF_RX_BUFFER_FULL;
> + spin_lock(&ctx->notif.notif_lock);
> +
> + if ( (flags & FFA_NOTIF_FLAG_BITMAP_HYP) && ctx->notif.hyp_pending )
> + {
> + w7 = ctx->notif.hyp_pending;
> + ctx->notif.hyp_pending = 0;
> + if ( !ctx->notif.vm_pending )
> + ctx->notif.notif_irq_raised = false;
> + }
> +
> + spin_unlock(&ctx->notif.notif_lock);
> }
>
> ffa_set_regs(regs, FFA_SUCCESS_32, 0, w2, w3, w4, w5, w6, w7);
> @@ -211,9 +227,12 @@ void ffa_raise_rx_buffer_full(struct domain *d)
> if ( !ctx )
> return;
>
> - ACCESS_ONCE(ctx->notif.buff_full_pending) = true;
> - if ( !test_and_set_bool(ctx->notif.vm_pending) )
> - inject_notif_pending(d);
> + spin_lock(&ctx->notif.notif_lock);
> + ctx->notif.hyp_pending |= FFA_NOTIF_RX_BUFFER_FULL;
> + if ( !ctx->notif.notif_irq_raised &&
> + inject_notif_pending(d) )
> + ctx->notif.notif_irq_raised = true;
> + spin_unlock(&ctx->notif.notif_lock);
> }
> #endif
>
> @@ -426,12 +445,16 @@ void ffa_notif_init(void)
>
> int ffa_notif_domain_init(struct domain *d)
> {
> + struct ffa_ctx *ctx = d->arch.tee;
> int32_t res;
>
> + spin_lock_init(&ctx->notif.notif_lock);
> + ctx->notif.notif_irq_raised = false;
> + ctx->notif.hyp_pending = 0;
> +
> if ( fw_notif_enabled )
> {
> -
> - res = ffa_notification_bitmap_create(ffa_get_vm_id(d), d->max_vcpus);
> + res = ffa_notification_bitmap_create(ctx->ffa_id, d->max_vcpus);
> if ( res )
> return -ENOMEM;
> }
> @@ -441,10 +464,17 @@ int ffa_notif_domain_init(struct domain *d)
>
> void ffa_notif_domain_destroy(struct domain *d)
> {
> + struct ffa_ctx *ctx = d->arch.tee;
> +
> + spin_lock(&ctx->notif.notif_lock);
> + ctx->notif.notif_irq_raised = false;
> + ctx->notif.hyp_pending = 0;
> + spin_unlock(&ctx->notif.notif_lock);
> +
> /*
> * Call bitmap_destroy even if bitmap create failed as the SPMC will
> * return a DENIED error that we will ignore.
> */
> if ( fw_notif_enabled )
> - ffa_notification_bitmap_destroy(ffa_get_vm_id(d));
> + ffa_notification_bitmap_destroy(ctx->ffa_id);
> }
> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
> index c291f32b56ff..9ddda3f88986 100644
> --- a/xen/arch/arm/tee/ffa_private.h
> +++ b/xen/arch/arm/tee/ffa_private.h
> @@ -340,9 +340,20 @@ struct ffa_ctx_notif {
> bool vm_pending;
>
> /*
> - * True if domain has buffer full notification pending
> + * Lock protecting the hypervisor-managed notification state.
> */
> - bool buff_full_pending;
> + spinlock_t notif_lock;
> +
> + /*
> + * Tracks whether a local notification pending interrupt was raised.
> + * Protected by notif_lock.
> + */
> + bool notif_irq_raised;
> +
> + /*
> + * Bitmap of pending hypervisor notifications (for HYP bitmap queries).
> + */
> + uint32_t hyp_pending;
> };
>
> struct ffa_ctx {
> --
> 2.53.0
>
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |