[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 09/23] xen/arm: vsmmuv3: Add support for cmdqueue handling


  • To: Milan Djokic <milan_djokic@xxxxxxxx>
  • From: Luca Fancellu <Luca.Fancellu@xxxxxxx>
  • Date: Mon, 13 Apr 2026 08:48:05 +0000
  • Accept-language: en-GB, en-US
  • Arc-authentication-results: i=2; mx.microsoft.com 1; spf=pass (sender ip is 4.158.2.129) smtp.rcpttodomain=epam.com smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=pass (signature was verified) header.d=arm.com; arc=pass (0 oda=1 ltdi=1 spf=[1,1,smtp.mailfrom=arm.com] dkim=[1,1,header.d=arm.com] dmarc=[1,1,header.from=arm.com])
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass header.d=arm.com; arc=none
  • Arc-message-signature: i=2; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=uSyyuiyaulZe6vGzx6PQiq0FhYY6RBF0uFvWX1WEj3I=; b=zANlsrk+ZNzv5cowB1/4dD1489yw7eRCwqCR6/a8T4Q6z/xep1UeajGfkYi/FUhLr9WVEQvbxy/tv1duRI59HkHLXydbfWOFJxawPLJoXAp9JZk/jNMS7Pw5mW3JNcUxzG/mBdJO5P+3dXfQ32bAgV4Q2OpUEwZFr3pBjCpHKBdwKgTKTlHBOmjkC5UXC4qUxFoHk/aO3sFaihUzhcBgGBo9Pp12yYNZhuQ90jV4FUaGUHJkgaXYBuKCkRdJIpDYJsxrrhrEMOuvA177PK13IQFJXjMjjZDjq29gbWE3CeLnJgvJr3/Vc3pmsXeX//B9WsInUrZw5Rc2qMGw/L+1IQ==
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=uSyyuiyaulZe6vGzx6PQiq0FhYY6RBF0uFvWX1WEj3I=; b=aW+ufWqGNIybB1C9C1WGnh+rUSnMzRBdq0ti0btetjEvRQaHFBOs+fLEnzKMjoQbdQjqclaVnbsLGwARendaiyq8wJfFXRz5r51ejo/FJTkDjWerxjKK7yw2SSP+OJQllHaUicSn3sbtnE2GQuodq3p74caHwXRYxNAfG3r/yXdc2wftO51IzCTma2dj1XRogcgHX2qoDfSmJkjfY1W77zfwP2Ahuii4WBWhE4ScXu7EIkWly0CswQmegr52rgqI/z1qvCKGNFCMGR5rlH02rgSlbvAFpmJXCdIZcepx8CLUpeRB+0gjJBI/n1gklu1sFRZ6Hfj5PzKBeaXNvKQYVQ==
  • Arc-seal: i=2; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=pass; b=Hrzm0UhGfKBAsdGQ9gD3L6mQpF6HZps3WIadIWqnopdO5OjpEp4YkZOc5qK7e+cEsU4848LYVqwCJsnpxSHqJ57lbQLUcVcWIWdpSR6/vHPQJQ8j6xIxHZx7gujYSrxLQ2TdIpaOVcgVp5Q1GBmRheSgl2UIA0DRLtfXpxcS9N5cWBT+AChwWlq273us2uFgI1uB3d6JZDgSkAhTgqSzyRRb7qum1d8vH36BVoPkw13M0/ciDTWVMqMNum/HBpzF8NYyX3bwDR6VIDXEmkUbz3wzQ5qhMwmIFqqBlYxmBggd8BTKsJNYE2V0Ud9MbU46227cQldhV05IOiAxtMv3CQ==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=a98PXDywqD7slfSM/GTSPVfowivBvPMi9roq7NWBav4K3mv8foXl29mMS6bi7RS6QjbKTvOYTXpqZ+o0VzKca5pA1vkxmJavm1wxBmd3VDY8KhDbZZipJRbdxAaALXgBjg71hLnuE9WDkTFDiv/zrnELRKlH+4fuS7uvmezAA9SgDEbFBY0PyBG3duBZoj6JuuOUHodlytd0LW6e07BwD7HlB4xL+v9BIDeVuegzk5n125UyFeVNOQCGAeEq2UIcg+PzqTalUqLaziJCIsG5EEfF08eo9pwVKn4M2Y0nQP71ojKXBgG4uaqKfGvgJzlsGmTnIchhbon5HIYH9MMDKg==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"; dkim=pass header.s=selector1 header.d=arm.com header.i="@arm.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results-original: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=arm.com;
  • Cc: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, Rahul Singh <Rahul.Singh@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Bertrand Marquis <Bertrand.Marquis@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
  • Delivery-date: Mon, 13 Apr 2026 08:49:22 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true
  • Thread-index: AQHcyyI+uCH3fkXed0iT4v0tOTag1A==
  • Thread-topic: [PATCH v3 09/23] xen/arm: vsmmuv3: Add support for cmdqueue handling

Hi Milan,

> On 31 Mar 2026, at 02:52, Milan Djokic <milan_djokic@xxxxxxxx> wrote:
> 
> From: Rahul Singh <rahul.singh@xxxxxxx>
> 
> Add support for virtual cmdqueue handling for guests
> 
> Signed-off-by: Rahul Singh <rahul.singh@xxxxxxx>
> Signed-off-by: Milan Djokic <milan_djokic@xxxxxxxx>
> ---
> xen/drivers/passthrough/arm/vsmmu-v3.c | 101 +++++++++++++++++++++++++
> 1 file changed, 101 insertions(+)
> 
> diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c 
> b/xen/drivers/passthrough/arm/vsmmu-v3.c
> index 3ae1e62a50..02fe6a4422 100644
> --- a/xen/drivers/passthrough/arm/vsmmu-v3.c
> +++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
> @@ -1,5 +1,6 @@
> /* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
> 
> +#include <xen/guest_access.h>
> #include <xen/param.h>
> #include <xen/sched.h>
> #include <asm/mmio.h>
> @@ -25,6 +26,26 @@
> /* Struct to hold the vIOMMU ops and vIOMMU type */
> extern const struct viommu_desc __read_mostly *cur_viommu;
> 
> +/* SMMUv3 command definitions */
> +#define CMDQ_OP_PREFETCH_CFG    0x1
> +#define CMDQ_OP_CFGI_STE        0x3
> +#define CMDQ_OP_CFGI_ALL        0x4
> +#define CMDQ_OP_CFGI_CD         0x5
> +#define CMDQ_OP_CFGI_CD_ALL     0x6
> +#define CMDQ_OP_TLBI_NH_ASID    0x11
> +#define CMDQ_OP_TLBI_NH_VA      0x12
> +#define CMDQ_OP_TLBI_NSNH_ALL   0x30
> +#define CMDQ_OP_CMD_SYNC        0x46
> +
> +/* Queue Handling */
> +#define Q_BASE(q)       ((q)->q_base & Q_BASE_ADDR_MASK)
> +#define Q_CONS_ENT(q)   (Q_BASE(q) + Q_IDX(q, (q)->cons) * (q)->ent_size)
> +#define Q_PROD_ENT(q)   (Q_BASE(q) + Q_IDX(q, (q)->prod) * (q)->ent_size)
> +
> +/* Helper Macros */
> +#define smmu_get_cmdq_enabled(x)    FIELD_GET(CR0_CMDQEN, x)
> +#define smmu_cmd_get_command(x)     FIELD_GET(CMDQ_0_OP, x)
> +
> /* virtual smmu queue */
> struct arm_vsmmu_queue {
>     uint64_t    q_base; /* base register */
> @@ -49,8 +70,80 @@ struct virt_smmu {
>     uint64_t    gerror_irq_cfg0;
>     uint64_t    evtq_irq_cfg0;
>     struct      arm_vsmmu_queue evtq, cmdq;
> +    spinlock_t  cmd_queue_lock;
> };
> 
> +/* Queue manipulation functions */
> +static bool queue_empty(struct arm_vsmmu_queue *q)
> +{
> +    return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
> +           Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
> +}
> +
> +static void queue_inc_cons(struct arm_vsmmu_queue *q)
> +{
> +    uint32_t cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
> +    q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
> +}
> +
> +static void dump_smmu_command(uint64_t *command)
> +{
> +    gdprintk(XENLOG_ERR, "cmd 0x%02llx: %016lx %016lx\n",

I think using PRIx64 is preferred

> +             smmu_cmd_get_command(command[0]), command[0], command[1]);
> +}
> +static int arm_vsmmu_handle_cmds(struct virt_smmu *smmu)
> +{
> +    struct arm_vsmmu_queue *q = &smmu->cmdq;
> +    struct domain *d = smmu->d;
> +    uint64_t command[CMDQ_ENT_DWORDS];
> +    paddr_t addr;

can we declare ‘int ret = 0;’ here and ...

> +
> +    if ( !smmu_get_cmdq_enabled(smmu->cr[0]) )
> +        return 0;
> +
> +    while ( !queue_empty(q) )
> +    {
> +        int ret;

remove this

> +
> +        addr = Q_CONS_ENT(q);
> +        ret = access_guest_memory_by_gpa(d, addr, command,
> +                                         sizeof(command), false);
> +        if ( ret )
> +            return ret;

Here we should at least have queue_inc_cons(q); before returning, since
otherwise we will indefinitely execute on the same element.

> +
> +        switch ( smmu_cmd_get_command(command[0]) )
> +        {
> +        case CMDQ_OP_CFGI_STE:
> +            break;
> +        case CMDQ_OP_PREFETCH_CFG:
> +        case CMDQ_OP_CFGI_CD:
> +        case CMDQ_OP_CFGI_CD_ALL:
> +        case CMDQ_OP_CFGI_ALL:
> +        case CMDQ_OP_CMD_SYNC:
> +            break;
> +        case CMDQ_OP_TLBI_NH_ASID:
> +        case CMDQ_OP_TLBI_NSNH_ALL:
> +        case CMDQ_OP_TLBI_NH_VA:
> +            if ( !iommu_iotlb_flush_all(smmu->d, 1) )
> +                break;

ret = iommu_iotlb_flush_all(smmu->d, 1);
if ( !ret )
    break;

> +        default:
> +            gdprintk(XENLOG_ERR, "vSMMUv3: unhandled command\n");
> +            dump_smmu_command(command);
> +            break;
> +        }
> +
> +        if ( ret )
> +        {
> +            gdprintk(XENLOG_ERR,
> +                     "vSMMUv3: command error %d while handling command\n",
> +                     ret);
> +            dump_smmu_command(command);
> +        }
> +        queue_inc_cons(q);
> +    }
> +    return 0;

return ret;

In this way we don’t suppress the iotbl error but we propagate to the caller.

> +}
> +
> static int vsmmuv3_mmio_write(struct vcpu *v, mmio_info_t *info,
>                               register_t r, void *priv)
> {
> @@ -104,9 +197,15 @@ static int vsmmuv3_mmio_write(struct vcpu *v, 
> mmio_info_t *info,
>         break;
> 
>     case VREG32(ARM_SMMU_CMDQ_PROD):
> +        spin_lock(&smmu->cmd_queue_lock);
>         reg32 = smmu->cmdq.prod;
>         vreg_reg32_update(&reg32, r, info);
>         smmu->cmdq.prod = reg32;
> +
> +        if ( arm_vsmmu_handle_cmds(smmu) )
> +            gdprintk(XENLOG_ERR, "error handling vSMMUv3 commands\n");
> +
> +        spin_unlock(&smmu->cmd_queue_lock);
>         break;
> 
>     case VREG32(ARM_SMMU_CMDQ_CONS):
> @@ -326,6 +425,8 @@ static int vsmmuv3_init_single(struct domain *d, paddr_t 
> addr, paddr_t size)
>     smmu->evtq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS);
>     smmu->evtq.ent_size = EVTQ_ENT_DWORDS * DWORDS_BYTES;
> 
> +    spin_lock_init(&smmu->cmd_queue_lock);
> +
>     register_mmio_handler(d, &vsmmuv3_mmio_handler, addr, size, smmu);
> 
>     /* Register the vIOMMU to be able to clean it up later. */
> 

Cheers,
Luca



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.