[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 06/10] xen/arm: if direct-map domain use native addresses for GICv2



Hi,

On 16/11/2021 06:31, Penny Zheng wrote:
From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>

Today we use native addresses to map the GICv2 for Dom0 and fixed
addresses for DomUs.

This patch changes the behavior so that native addresses are used for
all domains that are direct-mapped.

NEW VGIC has different naming schemes, like referring distributor base
address as vgic_dist_base, other than the dbase. So this patch also introduces
vgic_dist_base/vgic_cpu_base accessor to access correct distributor base
address/cpu interface base address on varied scenarios,

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxx>

I think it is more common to have the Signed-off-by of the Author first. Assuming the main author is Stefano, then this should be switched around.

---
v2 changes
- combine all changes in patch 4-6 here
---
v3 changes
- refine comment message
- add a comment explaining how the 38 was found of "char buf[38]"
- simply map the CPU interface at the GPA vgic_v2_hw.cbase
- remove a spurious change
---
  xen/arch/arm/domain_build.c    | 11 ++++++++---
  xen/arch/arm/vgic-v2.c         | 31 ++++++++++++++++++++++---------
  xen/arch/arm/vgic/vgic-v2.c    | 31 ++++++++++++++++++++++---------
  xen/include/asm-arm/new_vgic.h | 10 ++++++++++
  xen/include/asm-arm/vgic.h     | 11 +++++++++++
  5 files changed, 73 insertions(+), 21 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index b6fde74d74..c419a4b2cc 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -2230,8 +2230,13 @@ static int __init make_gicv2_domU_node(struct 
kernel_info *kinfo)
      int res = 0;
      __be32 reg[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) * 2];
      __be32 *cells;
+    const struct domain *d = kinfo->d;
+    /* Placeholder for interrupt-controller@ + a 64-bit number + \0 */
+    char buf[38];
- res = fdt_begin_node(fdt, "interrupt-controller@"__stringify(GUEST_GICD_BASE));
+    snprintf(buf, sizeof(buf), "interrupt-controller@%"PRIx64,
+             vgic_dist_base(&d->arch.vgic));
+    res = fdt_begin_node(fdt, buf);
      if ( res )
          return res;
@@ -2253,9 +2258,9 @@ static int __init make_gicv2_domU_node(struct kernel_info *kinfo) cells = &reg[0];
      dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, 
GUEST_ROOT_SIZE_CELLS,
-                       GUEST_GICD_BASE, GUEST_GICD_SIZE);
+                       vgic_dist_base(&d->arch.vgic), GUEST_GICD_SIZE);
      dt_child_set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, 
GUEST_ROOT_SIZE_CELLS,
-                       GUEST_GICC_BASE, GUEST_GICC_SIZE);
+                       vgic_cpu_base(&d->arch.vgic), GUEST_GICC_SIZE);
res = fdt_property(fdt, "reg", reg, sizeof(reg));
      if (res)
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 589c033eda..6f5492e30e 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -654,13 +654,10 @@ static int vgic_v2_vcpu_init(struct vcpu *v)
  static int vgic_v2_domain_init(struct domain *d)
  {
      int ret;
-    paddr_t cbase, csize;
+    paddr_t csize;
      paddr_t vbase;
- /*
-     * The hardware domain gets the hardware address.
-     * Guests get the virtual platform layout.
-     */
+    /* The hardware domain gets the hardware address. */

I would prefer if this comment is a summary of the if/else if/else. This would be easier to understand we need a different path for the hwdom
and direct-mapped domain.

"
The hardware domain and direct-mapped domains get the hardware address. We have to handle them separately because the hwdom is re-using the same Device-Tree as the host (see more details below).

Other domais get the virtual platform layout.
"

      if ( is_hardware_domain(d) )
      {
          d->arch.vgic.dbase = vgic_v2_hw.dbase;
@@ -671,10 +668,26 @@ static int vgic_v2_domain_init(struct domain *d)
           * Note that we assume the size of the CPU interface is always
           * aligned to PAGE_SIZE.
           */
-        cbase = vgic_v2_hw.cbase;
+        d->arch.vgic.cbase = vgic_v2_hw.cbase;
          csize = vgic_v2_hw.csize;
          vbase = vgic_v2_hw.vbase;
      }
+    else if ( is_domain_direct_mapped(d) )
+    {
+        /*
+         * For all the direct-mapped domain other than the hardware domain,
+         * we only map a 8kB CPU interface but we make sure it is at a
+         * location occupied by the physical GIC in the host device tree.
+         *
+         * We need to add an offset to the virtual CPU interface base
+         * address when the GIC is aliased to get a 8kB contiguous
+         * region.
+         */
+        d->arch.vgic.dbase = vgic_v2_hw.dbase;
+        d->arch.vgic.cbase = vgic_v2_hw.cbase;
+        csize = GUEST_GICC_SIZE;
+        vbase = vgic_v2_hw.vbase + vgic_v2_hw.aliased_offset;
+    }
      else
      {
          d->arch.vgic.dbase = GUEST_GICD_BASE;
@@ -685,7 +698,7 @@ static int vgic_v2_domain_init(struct domain *d)
           * region.
           */
          BUILD_BUG_ON(GUEST_GICC_SIZE != SZ_8K);
-        cbase = GUEST_GICC_BASE;
+        d->arch.vgic.cbase = GUEST_GICC_BASE;
          csize = GUEST_GICC_SIZE;
          vbase = vgic_v2_hw.vbase + vgic_v2_hw.aliased_offset;
      }
@@ -694,8 +707,8 @@ static int vgic_v2_domain_init(struct domain *d)
       * Map the gic virtual cpu interface in the gic cpu interface
       * region of the guest.
       */
-    ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+    ret = map_mmio_regions(d, gaddr_to_gfn(d->arch.vgic.cbase),
+                           csize / PAGE_SIZE, maddr_to_mfn(vbase));
      if ( ret )
          return ret;
diff --git a/xen/arch/arm/vgic/vgic-v2.c b/xen/arch/arm/vgic/vgic-v2.c
index b5ba4ace87..63d0f03688 100644
--- a/xen/arch/arm/vgic/vgic-v2.c
+++ b/xen/arch/arm/vgic/vgic-v2.c
@@ -258,14 +258,11 @@ void vgic_v2_enable(struct vcpu *vcpu)
  int vgic_v2_map_resources(struct domain *d)
  {
      struct vgic_dist *dist = &d->arch.vgic;
-    paddr_t cbase, csize;
+    paddr_t csize;
      paddr_t vbase;
      int ret;
- /*
-     * The hardware domain gets the hardware address.
-     * Guests get the virtual platform layout.
-     */
+    /* The hardware domain gets the hardware address. */

Same as above. You could simply copy/paste the comment.

      if ( is_hardware_domain(d) )
      {
          d->arch.vgic.vgic_dist_base = gic_v2_hw_data.dbase;
@@ -276,10 +273,26 @@ int vgic_v2_map_resources(struct domain *d)
           * Note that we assume the size of the CPU interface is always
           * aligned to PAGE_SIZE.
           */
-        cbase = gic_v2_hw_data.cbase;
+        d->arch.vgic.vgic_cpu_base = gic_v2_hw_data.cbase;
          csize = gic_v2_hw_data.csize;
          vbase = gic_v2_hw_data.vbase;
      }
+    else if ( is_domain_direct_mapped(d) )
+    {
+        d->arch.vgic.vgic_dist_base = gic_v2_hw_data.dbase;
+        /*
+         * For all the direct-mapped domain other than the hardware domain,
+         * we only map a 8kB CPU interface but we make sure it is at a location
+         * occupied by the physical GIC in the host device tree.
+         *
+         * We need to add an offset to the virtual CPU interface base
+         * address when the GIC is aliased to get a 8kB contiguous
+         * region.
+         */
+        d->arch.vgic.vgic_cpu_base = gic_v2_hw_data.cbase;
+        csize = GUEST_GICC_SIZE;
+        vbase = gic_v2_hw_data.vbase + gic_v2_hw_data.aliased_offset;
+    }
      else
      {
          d->arch.vgic.vgic_dist_base = GUEST_GICD_BASE;
@@ -290,7 +303,7 @@ int vgic_v2_map_resources(struct domain *d)
           * region.
           */
          BUILD_BUG_ON(GUEST_GICC_SIZE != SZ_8K);
-        cbase = GUEST_GICC_BASE;
+        d->arch.vgic.vgic_cpu_base = GUEST_GICC_BASE;
          csize = GUEST_GICC_SIZE;
          vbase = gic_v2_hw_data.vbase + gic_v2_hw_data.aliased_offset;
      }
@@ -308,8 +321,8 @@ int vgic_v2_map_resources(struct domain *d)
       * Map the gic virtual cpu interface in the gic cpu interface
       * region of the guest.
       */
-    ret = map_mmio_regions(d, gaddr_to_gfn(cbase), csize / PAGE_SIZE,
-                           maddr_to_mfn(vbase));
+    ret = map_mmio_regions(d, gaddr_to_gfn(d->arch.vgic.vgic_cpu_base),
+                           csize / PAGE_SIZE, maddr_to_mfn(vbase));
      if ( ret )
      {
          gdprintk(XENLOG_ERR, "Unable to remap VGIC CPU to VCPU\n");
diff --git a/xen/include/asm-arm/new_vgic.h b/xen/include/asm-arm/new_vgic.h
index 97d622bff6..ab57fcd91d 100644
--- a/xen/include/asm-arm/new_vgic.h
+++ b/xen/include/asm-arm/new_vgic.h
@@ -186,6 +186,16 @@ struct vgic_cpu {
      uint32_t num_id_bits;
  };
+static inline paddr_t vgic_cpu_base(const struct vgic_dist *vgic)
+{
+    return vgic->vgic_cpu_base;
+}
+
+static inline paddr_t vgic_dist_base(const struct vgic_dist *vgic)
+{
+    return vgic->vgic_dist_base;
+}
+
  #endif /* __ASM_ARM_NEW_VGIC_H */
/*
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index e69a59063a..a81a06c711 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -152,6 +152,7 @@ struct vgic_dist {
      struct pending_irq *pending_irqs;
      /* Base address for guest GIC */
      paddr_t dbase; /* Distributor base address */
+    paddr_t cbase; /* CPU interface base address */
  #ifdef CONFIG_GICV3
      /* GIC V3 addressing */
      /* List of contiguous occupied by the redistributors */
@@ -271,6 +272,16 @@ static inline int REG_RANK_NR(int b, uint32_t n)
enum gic_sgi_mode; +static inline paddr_t vgic_cpu_base(const struct vgic_dist *vgic)
+{
+    return vgic->cbase;
+}
+
+static inline paddr_t vgic_dist_base(const struct vgic_dist *vgic)
+{
+    return vgic->dbase;
+}
+
  /*
   * Offset of GICD_<FOO><n> with its rank, for GICD_<FOO> size <s> with
   * <b>-bits-per-interrupt.


Cheers,

--
Julien Grall



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.