[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 11/20] xen/arm: vsmmuv3: Attach Stage-1 configuration to SMMUv3 hardware


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Milan Djokic <milan_djokic@xxxxxxxx>
  • Date: Thu, 7 Aug 2025 16:59:26 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=epam.com; dmarc=pass action=none header.from=epam.com; dkim=pass header.d=epam.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=KEniSrR57nVltdVydlhahhmJzE8muwus3yOrLndbPSk=; b=LXmHZb+EuI22YeJzJB/JXW1YvLapkgOzfsjkUJdY0fJ91K684dxmxXEZudD+zSY2F5veJ3Yf1VFf2WbWwHWtV/YlO90GihThSGa+oOJC1jbjsfNCFIavqhYIIYyHJvefH6PLqhRmfB7n35i5rWVpj9ILeO3HmfBclu0CHkcL3ZGTTtDZgj0jmEpwa3wsXHbb3ZA5kHTg/UDpumomzItoqbG3zdmmAADcOerOU6ZmaiRsk1r10xkFyu683oTy9k+vZ85nvL3+IZCxaMQHwnXXqPKppQa62stWkgPB6MqxuegktjnJC0QAeZTeST7NWonx1rYVxviGyJaZu3/gTGZx/Q==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=C4tYEYPxtx/fZPdXSMimyN5V0vlsMpgzYOAFPRmqKWCm0+Q1Z8TGw8u2a1QWGGLvMkQyAZ+OORje9TISg/Z+7tJHsxIyturjRxBfb39gN7xObRylTUtnyR2XAgEmISR3YTKvtuLOBx4zAtrScibqBtDeE2Dd9KYqL/yjpuplLzR9ALhvKopQPQufHfV0dOSsg5LFzg12h3uXmv6yHpGkMymifdJeiL8fYqP6nlod2yDpFKH80QFaqaouX7cQ6FnTivia3565SJPQlG7UvyRqv2NenULO94vV+VgIrs1mVfViDjmRUBSqsramhNRbGN7J4UNvvZ2xvDJK8gUc7pqQ2g==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=epam.com;
  • Cc: Rahul Singh <rahul.singh@xxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Milan Djokic <milan_djokic@xxxxxxxx>
  • Delivery-date: Thu, 07 Aug 2025 18:31:50 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHcB7yha1klxs18OESsoLM4R9ywNQ==
  • Thread-topic: [PATCH 11/20] xen/arm: vsmmuv3: Attach Stage-1 configuration to SMMUv3 hardware

From: Rahul Singh <rahul.singh@xxxxxxx>

Attach the Stage-1 configuration to device STE to support nested
translation for the guests.

Signed-off-by: Rahul Singh <rahul.singh@xxxxxxx>
Signed-off-by: Milan Djokic <milan_djokic@xxxxxxxx>
---
 xen/drivers/passthrough/arm/smmu-v3.c  | 79 ++++++++++++++++++++++++++
 xen/drivers/passthrough/arm/smmu-v3.h  |  1 +
 xen/drivers/passthrough/arm/vsmmu-v3.c | 18 ++++++
 xen/include/xen/iommu.h                | 14 +++++
 4 files changed, 112 insertions(+)

diff --git a/xen/drivers/passthrough/arm/smmu-v3.c 
b/xen/drivers/passthrough/arm/smmu-v3.c
index 193c892fcd..91bf72d420 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -2791,6 +2791,37 @@ static struct arm_smmu_device *arm_smmu_get_by_dev(const 
struct device *dev)
        return NULL;
 }
 
+static struct iommu_domain *arm_smmu_get_domain_by_sid(struct domain *d,
+                               u32 sid)
+{
+       int i;
+       unsigned long flags;
+       struct iommu_domain *io_domain;
+       struct arm_smmu_domain *smmu_domain;
+       struct arm_smmu_master *master;
+       struct arm_smmu_xen_domain *xen_domain = dom_iommu(d)->arch.priv;
+
+       /*
+        * Loop through the &xen_domain->contexts to locate a context
+        * assigned to this SMMU
+        */
+       list_for_each_entry(io_domain, &xen_domain->contexts, list) {
+               smmu_domain = to_smmu_domain(io_domain);
+
+               spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+               list_for_each_entry(master, &smmu_domain->devices, domain_head) 
{
+                       for (i = 0; i < master->num_streams; i++) {
+                               if (sid != master->streams[i].id)
+                                       continue;
+                               
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+                               return io_domain;
+                       }
+               }
+               spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+       }
+       return NULL;
+}
+
 static struct iommu_domain *arm_smmu_get_domain(struct domain *d,
                                struct device *dev)
 {
@@ -3003,6 +3034,53 @@ static void arm_smmu_iommu_xen_domain_teardown(struct 
domain *d)
        xfree(xen_domain);
 }
 
+static int arm_smmu_attach_guest_config(struct domain *d, u32 sid,
+               struct iommu_guest_config *cfg)
+{
+       int ret = -EINVAL;
+       unsigned long flags;
+       struct arm_smmu_master *master;
+       struct arm_smmu_domain *smmu_domain;
+       struct arm_smmu_xen_domain *xen_domain = dom_iommu(d)->arch.priv;
+       struct iommu_domain *io_domain = arm_smmu_get_domain_by_sid(d, sid);
+
+       if (!io_domain)
+               return -ENODEV;
+
+       smmu_domain = to_smmu_domain(io_domain);
+
+       spin_lock(&xen_domain->lock);
+
+       switch (cfg->config) {
+       case ARM_SMMU_DOMAIN_ABORT:
+               smmu_domain->abort = true;
+               break;
+       case ARM_SMMU_DOMAIN_BYPASS:
+               smmu_domain->abort = false;
+               break;
+       case ARM_SMMU_DOMAIN_NESTED:
+               /* Enable Nested stage translation. */
+               smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+               smmu_domain->s1_cfg.s1ctxptr = cfg->s1ctxptr;
+               smmu_domain->s1_cfg.s1fmt = cfg->s1fmt;
+               smmu_domain->s1_cfg.s1cdmax = cfg->s1cdmax;
+               smmu_domain->abort = false;
+               break;
+       default:
+               goto out;
+       }
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master, &smmu_domain->devices, domain_head)
+               arm_smmu_install_ste_for_dev(master);
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+       ret = 0;
+out:
+       spin_unlock(&xen_domain->lock);
+       return ret;
+}
+
 static const struct iommu_ops arm_smmu_iommu_ops = {
        .page_sizes             = PAGE_SIZE_4K,
        .init                   = arm_smmu_iommu_xen_domain_init,
@@ -3015,6 +3093,7 @@ static const struct iommu_ops arm_smmu_iommu_ops = {
        .unmap_page             = arm_iommu_unmap_page,
        .dt_xlate               = arm_smmu_dt_xlate,
        .add_device             = arm_smmu_add_device,
+       .attach_guest_config = arm_smmu_attach_guest_config
 };
 
 static __init int arm_smmu_dt_init(struct dt_device_node *dev,
diff --git a/xen/drivers/passthrough/arm/smmu-v3.h 
b/xen/drivers/passthrough/arm/smmu-v3.h
index d54f0a79f2..3e3a6cd080 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.h
+++ b/xen/drivers/passthrough/arm/smmu-v3.h
@@ -398,6 +398,7 @@ enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S2,
        ARM_SMMU_DOMAIN_NESTED,
        ARM_SMMU_DOMAIN_BYPASS,
+       ARM_SMMU_DOMAIN_ABORT,
 };
 
 /* Xen specific code. */
diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c 
b/xen/drivers/passthrough/arm/vsmmu-v3.c
index 3ecbe4861b..3b073b9dac 100644
--- a/xen/drivers/passthrough/arm/vsmmu-v3.c
+++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
@@ -224,8 +224,11 @@ static int arm_vsmmu_handle_cfgi_ste(struct virt_smmu 
*smmu, uint64_t *cmdptr)
 {
     int ret;
     uint64_t ste[STRTAB_STE_DWORDS];
+    struct domain *d = smmu->d;
+    struct domain_iommu *hd = dom_iommu(d);
     struct arm_vsmmu_s1_trans_cfg s1_cfg = {0};
     uint32_t sid = smmu_cmd_get_sid(cmdptr[0]);
+    struct iommu_guest_config guest_cfg = {0};
 
     ret = arm_vsmmu_find_ste(smmu, sid, ste);
     if ( ret )
@@ -235,6 +238,21 @@ static int arm_vsmmu_handle_cfgi_ste(struct virt_smmu 
*smmu, uint64_t *cmdptr)
     if ( ret )
         return (ret == -EAGAIN ) ? 0 : ret;
 
+    guest_cfg.s1ctxptr = s1_cfg.s1ctxptr;
+    guest_cfg.s1fmt = s1_cfg.s1fmt;
+    guest_cfg.s1cdmax = s1_cfg.s1cdmax;
+
+    if ( s1_cfg.bypassed )
+        guest_cfg.config = ARM_SMMU_DOMAIN_BYPASS;
+    else if ( s1_cfg.aborted )
+        guest_cfg.config = ARM_SMMU_DOMAIN_ABORT;
+    else
+        guest_cfg.config = ARM_SMMU_DOMAIN_NESTED;
+
+    ret = hd->platform_ops->attach_guest_config(d, sid, &guest_cfg);
+    if ( ret )
+        return ret;
+
     return 0;
 }
 
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 37c4a1dc82..21f905d44f 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -311,6 +311,15 @@ static inline int iommu_add_dt_pci_sideband_ids(struct 
pci_dev *pdev)
 
 #endif /* HAS_DEVICE_TREE_DISCOVERY */
 
+#ifdef CONFIG_ARM
+struct iommu_guest_config {
+    paddr_t     s1ctxptr;
+    uint8_t     config;
+    uint8_t     s1fmt;
+    uint8_t     s1cdmax;
+};
+#endif /* CONFIG_ARM */
+
 struct page_info;
 
 /*
@@ -387,6 +396,11 @@ struct iommu_ops {
 #endif
     /* Inhibit all interrupt generation, to be used at shutdown. */
     void (*quiesce)(void);
+
+#ifdef CONFIG_ARM
+    int (*attach_guest_config)(struct domain *d, u32 sid,
+                               struct iommu_guest_config *cfg);
+#endif
 };
 
 /*
-- 
2.43.0



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.