[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN PATCH 1/1] xen/arm: introduce dummy iommu node for dom0



Currently no IOMMU properties are exposed to dom0, thus kernel by default
assumes no protection and enables swiotlb-xen, which leads to costly and
unnecessary buffers bouncing.

To let kernel know which device is behing IOMMU and hence needs no swiotlb
services we introduce dummy xen-iommu node in FDT and link protected device
nodes to it, using here device tree iommu bindings.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@xxxxxxxx>
---
 xen/arch/arm/domain_build.c           | 44 +++++++++++++++++++++++++++
 xen/include/asm-arm/kernel.h          |  3 ++
 xen/include/public/device_tree_defs.h |  1 +
 3 files changed, 48 insertions(+)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 6cfc772e66..951ca0a0cb 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -623,6 +623,12 @@ static int __init write_properties(struct domain *d, 
struct kernel_info *kinfo,
         }
     }
 
+    if ( iommu_node && kinfo->phandle_iommu && dt_device_is_protected(node) )
+    {
+        res = fdt_property_cell(kinfo->fdt, "iommus", kinfo->phandle_iommu);
+        if ( res )
+            return res;
+    }
     return 0;
 }
 
@@ -948,6 +954,38 @@ static int __init make_cpus_node(const struct domain *d, 
void *fdt)
     return res;
 }
 
+static int __init make_iommu_node(const struct domain *d,
+                                  const struct kernel_info *kinfo)
+{
+    const char compat[] = "xen,iommu-el2-v1";
+    int res;
+
+    if ( !kinfo->phandle_iommu )
+        return 0;
+
+    dt_dprintk("Create iommu node\n");
+
+    res = fdt_begin_node(kinfo->fdt, "xen-iommu");
+    if ( res )
+        return res;
+
+    res = fdt_property(kinfo->fdt, "compatible", compat, sizeof(compat));
+    if ( res )
+        return res;
+
+    res = fdt_property_cell(kinfo->fdt, "#iommu-cells", 0);
+    if ( res )
+        return res;
+
+    res = fdt_property_cell(kinfo->fdt, "phandle", kinfo->phandle_iommu);
+
+    res = fdt_end_node(kinfo->fdt);
+    if ( res )
+        return res;
+
+    return res;
+}
+
 static int __init make_gic_node(const struct domain *d, void *fdt,
                                 const struct dt_device_node *node)
 {
@@ -1584,6 +1622,10 @@ static int __init handle_node(struct domain *d, struct 
kernel_info *kinfo,
         if ( res )
             return res;
 
+        res = make_iommu_node(d, kinfo);
+        if ( res )
+            return res;
+
         res = make_memory_node(d, kinfo->fdt, addrcells, sizecells, 
&kinfo->mem);
         if ( res )
             return res;
@@ -2177,6 +2219,8 @@ static int __init prepare_dtb_hwdom(struct domain *d, 
struct kernel_info *kinfo)
     ASSERT(dt_host && (dt_host->sibling == NULL));
 
     kinfo->phandle_gic = dt_interrupt_controller->phandle;
+    if ( is_iommu_enabled(d) )
+        kinfo->phandle_iommu = GUEST_PHANDLE_IOMMU;
     fdt = device_tree_flattened;
 
     new_size = fdt_totalsize(fdt) + DOM0_FDT_EXTRA_SIZE;
diff --git a/xen/include/asm-arm/kernel.h b/xen/include/asm-arm/kernel.h
index 874aa108a7..efe09cd1e0 100644
--- a/xen/include/asm-arm/kernel.h
+++ b/xen/include/asm-arm/kernel.h
@@ -39,6 +39,9 @@ struct kernel_info {
     /* GIC phandle */
     uint32_t phandle_gic;
 
+    /* dummy iommu phandle */
+    uint32_t phandle_iommu;
+
     /* loader to use for this kernel */
     void (*load)(struct kernel_info *info);
     /* loader specific state */
diff --git a/xen/include/public/device_tree_defs.h 
b/xen/include/public/device_tree_defs.h
index 209d43de3f..df58944bd0 100644
--- a/xen/include/public/device_tree_defs.h
+++ b/xen/include/public/device_tree_defs.h
@@ -7,6 +7,7 @@
  * onwards. Reserve a high value for the GIC phandle.
  */
 #define GUEST_PHANDLE_GIC (65000)
+#define GUEST_PHANDLE_IOMMU (GUEST_PHANDLE_GIC + 1)
 
 #define GUEST_ROOT_ADDRESS_CELLS 2
 #define GUEST_ROOT_SIZE_CELLS 2
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.