[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 6/6] xen/arm: ffa: Deliver VM-to-VM notifications locally


  • To: Bertrand Marquis <bertrand.marquis@xxxxxxx>
  • From: Jens Wiklander <jens.wiklander@xxxxxxxxxx>
  • Date: Wed, 22 Apr 2026 16:04:02 +0200
  • Arc-authentication-results: i=1; mx.google.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20240605; h=content-transfer-encoding:cc:to:subject:message-id:date:from :in-reply-to:references:mime-version:dkim-signature; bh=SZbVa42LBrVIvv19c71FIcjtgICz5v6MXlpF0LMZ+PM=; fh=wNLC6Hyb5Ukz/ErppBRQBwv8vwa/OMsdh6R8bnNsiPU=; b=awT2RvHwDPA++jI3ltppRrn3Y//tyF9xjOKjGNQixWYyxfnf1b04om8S8SkvfDcgWN St2OQBj/a6k8lh/JG8g1mD399z3JLjKsXwS7AlEDLuO0d2KWI3iohJ1Ht+C4FTyEO4Hk LEvRGWjp2H63V6fhi2u+R661k2OWaWnFVxM4Cm1ElnjEkEg38GLMoxCvmawRjphfkXXP /Nn2O5cM2OKbtR0Z2WIjTXwrCfJHEJ3qD4+fZsTJWYFrEN8XkrD1/R3I6CWyjbirFpg7 vuPP9cMw+TXVdVJ4CuUMR630ESzNEZv1x5lWsNEydM4pXAcAaupzyYQof4jT2cHP+X+N 5ahA==; darn=lists.xenproject.org
  • Arc-seal: i=1; a=rsa-sha256; t=1776866655; cv=none; d=google.com; s=arc-20240605; b=g5J3CY3eNpJci3mNW0sTvZ8cDVOXXcD7JfGDOMcqcweopxL4NPdmvnZebSlRc40fNG G8AIGLLn9H8NoM0qaI/6QwfHANoOdWwo937WXQh/jDNhSb4f5JOEjji5DiPPyYp1ikM9 k1zdXOaxiImq/LNJdjxpxJu/NMJS9yiUnC2N/jbs7m6Ac1AplvPST4op9RFId6+xtBW/ lNoGQf9UpoZGqZjwTPrz9QNGMZIszv7m2HaW7V8oP1AECziQjhd2yKO90ocn5GWOx1Oy sWwgQ1gQU7dJSdbVfkQjOAepipBpVmDNy7EOK4hbalkXeSuyEtGwePu5SDp19VkPP885 0bYQ==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=google header.d=linaro.org header.i="@linaro.org" header.h="Content-Transfer-Encoding:Cc:To:Subject:Message-ID:Date:From:In-Reply-To:References:MIME-Version"
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx, Volodymyr Babchuk <volodymyr_babchuk@xxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>
  • Delivery-date: Wed, 22 Apr 2026 14:04:24 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Hi Bertrand,

On Fri, Apr 17, 2026 at 3:41 PM Bertrand Marquis
<bertrand.marquis@xxxxxxx> wrote:
>
> VM notification binding and pending tracking exist for non-secure
> endpoints, but FFA_NOTIFICATION_SET still only forwards secure
> destinations to the SPMC. Non-secure VMs therefore cannot receive
> notifications from other VMs. Local NPI delivery also needs explicit
> re-arm tracking so repeated raises are not lost while the interrupt is
> already pending.
>
> Add a local VM notification delivery path for non-secure destinations.
> notification_set_vm() resolves the destination endpoint, verifies that
> every requested bit is bound to the sender, sets the receiver's
> vm_pending bitmap under notif_lock, and raises an NPI only when the
> receiver transitions from no local pending notifications to some.
>
> Track whether a local NPI is already armed with notif_irq_raised, clear
> that state once both VM and hypervisor pending bitmaps are drained, and
> roll back newly-added VM pending bits if no destination vCPU is online.
> Also expose firmware notification availability so FFA_FEATURES only
> advertises notification support when it is actually provided by the
> firmware or by CONFIG_FFA_VM_TO_VM.
>
> Functional impact: when CONFIG_FFA_VM_TO_VM is enabled, non-secure
> FFA_NOTIFICATION_SET delivers VM-to-VM notifications locally and keeps
> NPI delivery reliable across repeated raises.
>
> Signed-off-by: Bertrand Marquis <bertrand.marquis@xxxxxxx>
> ---
>  xen/arch/arm/tee/ffa.c         |  24 +++++--
>  xen/arch/arm/tee/ffa_notif.c   | 126 +++++++++++++++++++++++++++++++--
>  xen/arch/arm/tee/ffa_private.h |  11 ++-
>  3 files changed, 147 insertions(+), 14 deletions(-)
>
> diff --git a/xen/arch/arm/tee/ffa.c b/xen/arch/arm/tee/ffa.c
> index 1fe33f26454a..7fe021049cba 100644
> --- a/xen/arch/arm/tee/ffa.c
> +++ b/xen/arch/arm/tee/ffa.c
> @@ -39,8 +39,13 @@
>   * o FFA_MSG_SEND_DIRECT_REQ:
>   *   - only supported from a VM to an SP
>   * o FFA_NOTIFICATION_*:
> + *   - only supported when firmware notifications are enabled or VM-to-VM
> + *     support is built in
>   *   - only supports global notifications, that is, per vCPU notifications
> - *     are not supported
> + *     are not supported and secure per-vCPU notification information is
> + *     not forwarded
> + *   - the source endpoint ID reported for a notification may no longer
> + *     exist by the time the receiver consumes it
>   *   - doesn't support signalling the secondary scheduler of pending
>   *     notification for secure partitions
>   *   - doesn't support notifications for Xen itself
> @@ -245,6 +250,8 @@ static void handle_features(struct cpu_user_regs *regs)
>      uint32_t a1 = get_user_reg(regs, 1);
>      struct domain *d = current->domain;
>      struct ffa_ctx *ctx = d->arch.tee;
> +    bool notif_supported = IS_ENABLED(CONFIG_FFA_VM_TO_VM) ||
> +                           ffa_notif_fw_enabled();
>
>      /*
>       * FFA_FEATURES defines w2 as input properties only for specific
> @@ -343,10 +350,16 @@ static void handle_features(struct cpu_user_regs *regs)
>
>          break;
>      case FFA_FEATURE_NOTIF_PEND_INTR:
> -        ffa_set_regs_success(regs, GUEST_FFA_NOTIF_PEND_INTR_ID, 0);
> +        if ( notif_supported )
> +            ffa_set_regs_success(regs, GUEST_FFA_NOTIF_PEND_INTR_ID, 0);
> +        else
> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>          break;
>      case FFA_FEATURE_SCHEDULE_RECV_INTR:
> -        ffa_set_regs_success(regs, GUEST_FFA_SCHEDULE_RECV_INTR_ID, 0);
> +        if ( notif_supported )
> +            ffa_set_regs_success(regs, GUEST_FFA_SCHEDULE_RECV_INTR_ID, 0);
> +        else
> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>          break;
>      case FFA_PARTITION_INFO_GET_REGS:
>          if ( ACCESS_ONCE(ctx->guest_vers) >= FFA_VERSION_1_2 )
> @@ -361,7 +374,10 @@ static void handle_features(struct cpu_user_regs *regs)
>      case FFA_NOTIFICATION_SET:
>      case FFA_NOTIFICATION_INFO_GET_32:
>      case FFA_NOTIFICATION_INFO_GET_64:
> -        ffa_set_regs_success(regs, 0, 0);
> +        if ( notif_supported )
> +            ffa_set_regs_success(regs, 0, 0);
> +        else
> +            ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
>          break;
>      default:
>          ffa_set_regs_error(regs, FFA_RET_NOT_SUPPORTED);
> diff --git a/xen/arch/arm/tee/ffa_notif.c b/xen/arch/arm/tee/ffa_notif.c
> index 4def701f0130..e77321244926 100644
> --- a/xen/arch/arm/tee/ffa_notif.c
> +++ b/xen/arch/arm/tee/ffa_notif.c
> @@ -20,7 +20,12 @@ static bool __ro_after_init fw_notif_enabled;
>  static unsigned int __ro_after_init notif_sri_irq;
>  static DEFINE_SPINLOCK(notif_info_lock);
>
> -static void inject_notif_pending(struct domain *d)
> +bool ffa_notif_fw_enabled(void)
> +{
> +    return fw_notif_enabled;
> +}
> +
> +static bool inject_notif_pending(struct domain *d)
>  {
>      struct vcpu *v;
>
> @@ -34,13 +39,15 @@ static void inject_notif_pending(struct domain *d)
>          if ( is_vcpu_online(v) )
>          {
>              vgic_inject_irq(d, v, GUEST_FFA_NOTIF_PEND_INTR_ID, true);
> -            return;
> +            return true;
>          }
>      }
>
>      if ( printk_ratelimit() )
>          printk(XENLOG_G_DEBUG "%pd: ffa: can't inject NPI, all vCPUs 
> offline\n",
>                 d);
> +
> +    return false;
>  }
>
>  static int32_t ffa_notif_parse_params(uint16_t dom_id, uint16_t caller_id,
> @@ -104,6 +111,73 @@ out_unlock:
>      return ret;
>  }
>
> +/*
> + * Deliver a VM-to-VM notification. ctx->notif.notif_lock protects
> + * vm_bind/vm_pending so callers must not hold it already.
> + */
> +static int32_t notification_set_vm(uint16_t dst_id, uint16_t src_id,
> +                                   uint32_t flags, uint64_t bitmap)
> +{
> +    struct domain *dst_d;
> +    struct ffa_ctx *dst_ctx;
> +    unsigned int id;
> +    int32_t ret;
> +    uint64_t prev_bitmap = 0;
> +    uint64_t new_bitmap;
> +    bool inject = false;
> +
> +    if ( flags )
> +        return FFA_RET_INVALID_PARAMETERS;
> +
> +    ret = ffa_endpoint_domain_lookup(dst_id, &dst_d, &dst_ctx);
> +    if ( ret )
> +        return ret;
> +
> +    ret = FFA_RET_OK;
> +
> +    spin_lock(&dst_ctx->notif.notif_lock);
> +
> +    for ( id = 0; id < FFA_NUM_VM_NOTIF; id++ )
> +    {
> +        if ( !(bitmap & BIT(id, ULL)) )
> +            continue;
> +
> +        if ( dst_ctx->notif.vm_bind[id] != src_id )
> +        {
> +            ret = FFA_RET_DENIED;
> +            goto out_unlock;
> +        }
> +    }
> +
> +    prev_bitmap = dst_ctx->notif.vm_pending;
> +    dst_ctx->notif.vm_pending |= bitmap;
> +    if ( !dst_ctx->notif.notif_irq_raised &&
> +         (dst_ctx->notif.vm_pending || dst_ctx->notif.hyp_pending) )
> +    {
> +        dst_ctx->notif.notif_irq_raised = true;
> +        inject = true;
> +    }
> +
> +out_unlock:
> +    spin_unlock(&dst_ctx->notif.notif_lock);
> +
> +    new_bitmap = bitmap & ~prev_bitmap;
> +    if ( ret == FFA_RET_OK && inject && new_bitmap &&
> +         !inject_notif_pending(dst_d) )
> +    {
> +        spin_lock(&dst_ctx->notif.notif_lock);
> +        dst_ctx->notif.vm_pending &= ~new_bitmap;

There's a window above when dst_ctx->notif.notif_lock is unlocked.
What if another CPU has modified dst_ctx->notif.vm_pending during that
window?

Cheers,
Jens

> +        if ( !(dst_ctx->notif.vm_pending || dst_ctx->notif.hyp_pending) )
> +            dst_ctx->notif.notif_irq_raised = false;
> +        spin_unlock(&dst_ctx->notif.notif_lock);
> +        ret = FFA_RET_DENIED;
> +    }
> +
> +    rcu_unlock_domain(dst_d);
> +
> +    return ret;
> +}
> +
>  int32_t ffa_handle_notification_bind(struct cpu_user_regs *regs)
>  {
>      struct domain *d = current->domain;
> @@ -285,6 +359,8 @@ void ffa_handle_notification_get(struct cpu_user_regs 
> *regs)
>
>      if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
>      {
> +        bool pending;
> +
>          spin_lock(&ctx->notif.notif_lock);
>
>          if ( (flags & FFA_NOTIF_FLAG_BITMAP_HYP) && ctx->notif.hyp_pending )
> @@ -293,6 +369,18 @@ void ffa_handle_notification_get(struct cpu_user_regs 
> *regs)
>              ctx->notif.hyp_pending = 0;
>          }
>
> +        if ( (flags & FFA_NOTIF_FLAG_BITMAP_VM) && ctx->notif.vm_pending )
> +        {
> +            w4 = (uint32_t)(ctx->notif.vm_pending & GENMASK(31, 0));
> +            w5 = (uint32_t)((ctx->notif.vm_pending >> 32) & GENMASK(31, 0));
> +            ctx->notif.vm_pending = 0;
> +        }
> +
> +        pending = (ctx->notif.hyp_pending != 0) ||
> +                  (ctx->notif.vm_pending != 0);
> +        if ( !pending )
> +            ctx->notif.notif_irq_raised = false;
> +
>          spin_unlock(&ctx->notif.notif_lock);
>      }
>
> @@ -318,9 +406,17 @@ int32_t ffa_handle_notification_set(struct cpu_user_regs 
> *regs)
>      if ( flags )
>          return FFA_RET_INVALID_PARAMETERS;
>
> -    if ( FFA_ID_IS_SECURE(dest_id) && fw_notif_enabled )
> -        return ffa_simple_call(FFA_NOTIFICATION_SET, src_dst, flags, 
> bitmap_lo,
> -                               bitmap_hi);
> +    if ( FFA_ID_IS_SECURE(dest_id) )
> +    {
> +        if ( fw_notif_enabled )
> +            return ffa_simple_call(FFA_NOTIFICATION_SET, src_dst, flags,
> +                                   bitmap_lo, bitmap_hi);
> +    }
> +    else if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) )
> +    {
> +        return notification_set_vm(dest_id, caller_id, flags,
> +                                   ((uint64_t)bitmap_hi << 32) | bitmap_lo);
> +    }
>
>      return FFA_RET_NOT_SUPPORTED;
>  }
> @@ -330,6 +426,7 @@ void ffa_raise_rx_buffer_full(struct domain *d)
>  {
>      struct ffa_ctx *ctx = d->arch.tee;
>      uint32_t prev_bitmap;
> +    bool inject = false;
>
>      if ( !ctx )
>          return;
> @@ -337,10 +434,23 @@ void ffa_raise_rx_buffer_full(struct domain *d)
>      spin_lock(&ctx->notif.notif_lock);
>      prev_bitmap = ctx->notif.hyp_pending;
>      ctx->notif.hyp_pending |= FFA_NOTIF_RX_BUFFER_FULL;
> +    if ( !ctx->notif.notif_irq_raised &&
> +         (ctx->notif.vm_pending || ctx->notif.hyp_pending) )
> +    {
> +        ctx->notif.notif_irq_raised = true;
> +        inject = true;
> +    }
>      spin_unlock(&ctx->notif.notif_lock);
>
> -    if ( !(prev_bitmap & FFA_NOTIF_RX_BUFFER_FULL) )
> -        inject_notif_pending(d);
> +    if ( inject && !(prev_bitmap & FFA_NOTIF_RX_BUFFER_FULL) &&
> +         !inject_notif_pending(d) )
> +    {
> +        spin_lock(&ctx->notif.notif_lock);
> +        ctx->notif.hyp_pending &= ~FFA_NOTIF_RX_BUFFER_FULL;
> +        if ( !(ctx->notif.vm_pending || ctx->notif.hyp_pending) )
> +            ctx->notif.notif_irq_raised = false;
> +        spin_unlock(&ctx->notif.notif_lock);
> +    }
>  }
>  #endif
>
> @@ -572,6 +682,7 @@ int ffa_notif_domain_init(struct domain *d)
>      spin_lock_init(&ctx->notif.notif_lock);
>      ctx->notif.secure_pending = false;
>      ctx->notif.vm_pending = 0;
> +    ctx->notif.notif_irq_raised = false;
>      for ( i = 0; i < FFA_NUM_VM_NOTIF; i++ )
>          ctx->notif.vm_bind[i] = 0;
>      ctx->notif.hyp_pending = 0;
> @@ -594,6 +705,7 @@ void ffa_notif_domain_destroy(struct domain *d)
>      spin_lock(&ctx->notif.notif_lock);
>      ctx->notif.secure_pending = false;
>      ctx->notif.vm_pending = 0;
> +    ctx->notif.notif_irq_raised = false;
>      for ( i = 0; i < FFA_NUM_VM_NOTIF; i++ )
>          ctx->notif.vm_bind[i] = 0;
>      ctx->notif.hyp_pending = 0;
> diff --git a/xen/arch/arm/tee/ffa_private.h b/xen/arch/arm/tee/ffa_private.h
> index 6d83afb3d00a..5bb19bd11dd0 100644
> --- a/xen/arch/arm/tee/ffa_private.h
> +++ b/xen/arch/arm/tee/ffa_private.h
> @@ -344,13 +344,17 @@ struct ffa_ctx_notif {
>      uint64_t vm_pending;
>
>      /*
> -     * Source endpoint bound to each VM notification ID (0 means unbound).
> +     * Tracks whether an NPI has been raised for local pending notifications.
> +     * Protected by notif_lock.
>       */
> -    uint16_t vm_bind[FFA_NUM_VM_NOTIF];
> +    bool notif_irq_raised;
>
>      /*
> -     * Lock protecting the hypervisor-managed notification state.
> +     * Source endpoint bound to each VM notification ID (0 means unbound).
>       */
> +    uint16_t vm_bind[FFA_NUM_VM_NOTIF];
> +
> +    /* Lock protecting local notification state. */
>      spinlock_t notif_lock;
>
>      /*
> @@ -493,6 +497,7 @@ void ffa_notif_init(void);
>  void ffa_notif_init_interrupt(void);
>  int ffa_notif_domain_init(struct domain *d);
>  void ffa_notif_domain_destroy(struct domain *d);
> +bool ffa_notif_fw_enabled(void);
>
>  int32_t ffa_handle_notification_bind(struct cpu_user_regs *regs);
>  int32_t ffa_handle_notification_unbind(struct cpu_user_regs *regs);
> --
> 2.53.0
>



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.