[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 18/20] xen/arm: vsmmuv3: Add support to send stage-1 event to guest


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Milan Djokic <milan_djokic@xxxxxxxx>
  • Date: Thu, 7 Aug 2025 16:59:33 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=LRKz4SORhM6Udu82OMwS2LwE/KWoTxc6MuWquW1TEVY=; b=x+Ow/p2AmEa+cP+GEbILxHSTJCGsrnaPuXgtn7ipyXVAMgFfvSdT5XSBQcuKyYZybfmfAgApebkYCgrPl1qhtp2Aeve/VUCeu5qOhFW1YMII4sg3bx3k973wop3tVi8PVDPDfLanBe5vY+iiQOMsC59wG6QjAX+j6HNhbaxuozBgNdVYt4wV0TiTOWYw4vTs7hi6nlXQ6Q2+Ao3EHWAdrjgrcP4bi6La33eBBDvbM+EiS0WDkTpTkFU5iTQGEykUItoGpOtcfRUialKT9D1HoZUkViWntf86aoEWme5ZZMDVwQqdDSrZXabXG5bTw+fyQDbjlsZiWVeEbwTvApa1Bg==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=yFpuBRn7HLjnX5gNJT2PBswLUn0uuw7Qph+u/FMkZJMti14v9V+HovCntAw4rtgawedSxbRWvPGLkIYxGi/cOH098vThCdzA88j+ygOn+OfSw1Xmwl0BNRdFh2yTKfxgNyxWKG/pa8KtVPUbkVyJB7V+TUh7Vs+wbXlVl5tpqtw6YDIxE9jsiA87l3J0NDc2qgPkthVgWdxf2xCn5GLn9Ydk5F+4jMPuP3BIIboY+Mh9opcEyj8VB7wKRI4KZBJSrUxAADS/YleIpyMClnYVPJFqptTDKpXQjPy1ExxjNlPWhUZ4E4S7HNxHIbdxqo2wuuQyYIbCRVnYmPHDRqSbBw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Rahul Singh <rahul.singh@xxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Milan Djokic <milan_djokic@xxxxxxxx>
  • Delivery-date: Thu, 07 Aug 2025 18:31:54 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcB7ylYt7xb8ek2UKgm42MUihrsg==
  • Thread-topic: [PATCH 18/20] xen/arm: vsmmuv3: Add support to send stage-1 event to guest

From: Rahul Singh <rahul.singh@xxxxxxx>

Stage-1 translation is handled by guest, therefore stage-1 fault has to
be forwarded to guest.

Signed-off-by: Rahul Singh <rahul.singh@xxxxxxx>
Signed-off-by: Milan Djokic <milan_djokic@xxxxxxxx>
---
 xen/drivers/passthrough/arm/smmu-v3.c  | 48 ++++++++++++++++++++++++--
 xen/drivers/passthrough/arm/vsmmu-v3.c | 45 ++++++++++++++++++++++++
 xen/drivers/passthrough/arm/vsmmu-v3.h | 12 +++++++
 3 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu-v3.c 
b/xen/drivers/passthrough/arm/smmu-v3.c
index 91bf72d420..4ea3e90c11 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -853,7 +853,6 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device 
*smmu, u32 sid)
        return 0;
 }
 
-__maybe_unused
 static struct arm_smmu_master *
 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
 {
@@ -874,10 +873,51 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 
sid)
        return NULL;
 }
 
+static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
+{
+       int ret;
+       struct arm_smmu_master *master;
+       u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
+
+       switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
+       case EVT_ID_TRANSLATION_FAULT:
+               break;
+       case EVT_ID_ADDR_SIZE_FAULT:
+               break;
+       case EVT_ID_ACCESS_FAULT:
+               break;
+       case EVT_ID_PERMISSION_FAULT:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* Stage-2 event */
+       if (evt[1] & EVTQ_1_S2)
+               return -EFAULT;
+
+       mutex_lock(&smmu->streams_mutex);
+       master = arm_smmu_find_master(smmu, sid);
+       if (!master) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       ret = arm_vsmmu_handle_evt(master->domain->d, smmu->dev, evt);
+       if (ret) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&smmu->streams_mutex);
+       return ret;
+}
+
 /* IRQ and event handlers */
 static void arm_smmu_evtq_tasklet(void *dev)
 {
-       int i;
+       int i, ret;
        struct arm_smmu_device *smmu = dev;
        struct arm_smmu_queue *q = &smmu->evtq.q;
        struct arm_smmu_ll_queue *llq = &q->llq;
@@ -887,6 +927,10 @@ static void arm_smmu_evtq_tasklet(void *dev)
                while (!queue_remove_raw(q, evt)) {
                        u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
 
+                       ret = arm_smmu_handle_evt(smmu, evt);
+                       if (!ret)
+                               continue;
+
                        dev_info(smmu->dev, "event 0x%02x received:\n", id);
                        for (i = 0; i < ARRAY_SIZE(evt); ++i)
                                dev_info(smmu->dev, "\t0x%016llx\n",
diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c 
b/xen/drivers/passthrough/arm/vsmmu-v3.c
index a5b9700369..5d0dabd2b2 100644
--- a/xen/drivers/passthrough/arm/vsmmu-v3.c
+++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
@@ -103,6 +103,7 @@ struct arm_vsmmu_queue {
 struct virt_smmu {
     struct      domain *d;
     struct      list_head viommu_list;
+    paddr_t     addr;
     uint8_t     sid_split;
     uint32_t    features;
     uint32_t    cr[3];
@@ -237,6 +238,49 @@ void arm_vsmmu_send_event(struct virt_smmu *smmu,
     return;
 }
 
+static struct virt_smmu *vsmmuv3_find_by_addr(struct domain *d, paddr_t paddr)
+{
+    struct virt_smmu *smmu;
+
+    list_for_each_entry( smmu, &d->arch.viommu_list, viommu_list )
+    {
+        if ( smmu->addr == paddr )
+            return smmu;
+    }
+
+    return NULL;
+}
+
+int arm_vsmmu_handle_evt(struct domain *d, struct device *dev, uint64_t *evt)
+{
+    int ret;
+    struct virt_smmu *smmu;
+
+    if ( is_hardware_domain(d) )
+    {
+        paddr_t paddr;
+        /* Base address */
+        ret = dt_device_get_address(dev_to_dt(dev), 0, &paddr, NULL);
+        if ( ret )
+            return -EINVAL;
+
+        smmu = vsmmuv3_find_by_addr(d, paddr);
+        if ( !smmu )
+            return -ENODEV;
+    }
+    else
+    {
+        smmu = list_entry(d->arch.viommu_list.next,
+                          struct virt_smmu, viommu_list);
+    }
+
+    ret = arm_vsmmu_write_evtq(smmu, evt);
+    if ( ret )
+        arm_vsmmu_inject_irq(smmu, true, GERROR_EVTQ_ABT_ERR);
+
+    return 0;
+}
+
 static int arm_vsmmu_find_ste(struct virt_smmu *smmu, uint32_t sid,
                               uint64_t *ste)
 {
@@ -742,6 +786,7 @@ static int vsmmuv3_init_single(struct domain *d, paddr_t 
addr,
 
     smmu->d = d;
     smmu->virq = virq;
+    smmu->addr = addr;
     smmu->cmdq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_CMDQS);
     smmu->cmdq.ent_size = CMDQ_ENT_DWORDS * DWORDS_BYTES;
     smmu->evtq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS);
diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.h 
b/xen/drivers/passthrough/arm/vsmmu-v3.h
index e11f85b431..c7bfd3fb59 100644
--- a/xen/drivers/passthrough/arm/vsmmu-v3.h
+++ b/xen/drivers/passthrough/arm/vsmmu-v3.h
@@ -8,6 +8,12 @@
 
 void vsmmuv3_set_type(void);
 
+static inline int arm_vsmmu_handle_evt(struct domain *d,
+                                       struct device *dev, uint64_t *evt)
+{
+    return -EINVAL;
+}
+
 #else
 
 static inline void vsmmuv3_set_type(void)
@@ -15,6 +21,12 @@ static inline void vsmmuv3_set_type(void)
     return;
 }
 
+static inline int arm_vsmmu_handle_evt(struct domain *d,
+                                       struct device *dev, uint64_t *evt)
+{
+    return -EINVAL;
+}
+
 #endif /* CONFIG_VIRTUAL_ARM_SMMU_V3 */
 
 #endif /* __ARCH_ARM_VSMMU_V3_H__ */
-- 
2.43.0



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.