[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 11/12] arm/mem_access: Add short-descriptor based gpt



This commit adds functionality to walk the guest's page tables using the
short-descriptor translation table format for both ARMv7 and ARMv8. The
implementation is based on ARM DDI 0487B-a J1-6002 and ARM DDI 0406C-b
B3-1506.

Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
v3: Move the implementation to ./xen/arch/arm/guest_copy.c.

    Use defines instead of hardcoded values.

    Cosmetic fixes & Added more coments.

v4: Adjusted the names of short-descriptor data-types.

    Adapt the function to the new parameter of type "struct vcpu *".

    Cosmetic fixes.

v5: Make use of the function vgic_access_guest_memory read page table
    entries in guest memory. At the same time, eliminate the offsets
    array, as there is no need for an array. Instead, we apply the
    associated masks to compute the GVA offsets directly in the code.

    Use GENMASK to compute complex masks to ease code readability.

    Use the type uint32_t for the TTBR register.

    Make use of L2DESC_{SMALL|LARGE}_PAGE_SHIFT instead of
    PAGE_SHIFT_{4K|64K} macros.

    Remove {L1|L2}DESC_* defines from this commit.

    Add comments and cosmetic fixes.
---
 xen/arch/arm/guest_walk.c | 143 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 141 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/guest_walk.c b/xen/arch/arm/guest_walk.c
index 1f41fcefe9..d2bbc28da4 100644
--- a/xen/arch/arm/guest_walk.c
+++ b/xen/arch/arm/guest_walk.c
@@ -18,6 +18,7 @@
 #include <xen/domain_page.h>
 #include <xen/sched.h>
 #include <asm/guest_walk.h>
+#include <asm/short-desc.h>
 
 /*
  * The function guest_walk_sd translates a given GVA into an IPA using the
@@ -30,8 +31,146 @@ static int guest_walk_sd(const struct vcpu *v,
                          vaddr_t gva, paddr_t *ipa,
                          unsigned int *perms)
 {
-    /* Not implemented yet. */
-    return -EFAULT;
+    int ret;
+    bool disabled = true;
+    uint32_t ttbr;
+    paddr_t mask, paddr;
+    short_desc_t pte;
+    register_t ttbcr = READ_SYSREG(TCR_EL1);
+    unsigned int level = 0, n = ttbcr & TTBCR_N_MASK;
+    struct domain *d = v->domain;
+
+    mask = GENMASK_ULL(31, (32 - n));
+
+    if ( n == 0 || !(gva & mask) )
+    {
+        /*
+         * Use TTBR0 for GVA to IPA translation.
+         *
+         * Note that on AArch32, the TTBR0_EL1 register is 32-bit wide.
+         * Nevertheless, we have to use the READ_SYSREG64 macro, as it is
+         * required for reading TTBR0_EL1.
+         */
+        ttbr = READ_SYSREG64(TTBR0_EL1);
+
+        /* If TTBCR.PD0 is set, translations using TTBR0 are disabled. */
+        disabled = ttbcr & TTBCR_PD0;
+    }
+    else
+    {
+        /* Use TTBR1 for GVA to IPA translation.
+         *
+         * Note that on AArch32, the TTBR1_EL1 register is 32-bit wide.
+         * Nevertheless, we have to use the READ_SYSREG64 macro, as it is
+         * required for reading TTBR0_EL1.
+         */
+        ttbr = READ_SYSREG64(TTBR1_EL1);
+
+        /* If TTBCR.PD1 is set, translations using TTBR1 are disabled. */
+        disabled = ttbcr & TTBCR_PD1;
+
+        /*
+         * TTBR1 translation always works like n==0 TTBR0 translation (ARM DDI
+         * 0487B.a J1-6003).
+         */
+        n = 0;
+    }
+
+    if ( disabled )
+        return -EFAULT;
+
+    /*
+     * The address of the L1 descriptor for the initial lookup has the
+     * following format: [ttbr<31:14-n>:gva<31-n:20>:00] (ARM DDI 0487B.a
+     * J1-6003). Note that the following GPA computation already considers that
+     * the first level address translation might comprise up to four
+     * consecutive pages and does not need to be page-aligned if n > 2.
+     */
+    mask = GENMASK(31, (14 - n));
+    paddr = (ttbr & mask);
+
+    mask = GENMASK((31 - n), 20);
+    paddr |= (gva & mask) >> 18;
+
+    /* Access the guest's memory to read only one PTE. */
+    ret = vgic_access_guest_memory(d, paddr, &pte, sizeof(short_desc_t), 
false);
+    if ( ret )
+        return -EINVAL;
+
+    switch ( pte.walk.dt )
+    {
+    case L1DESC_INVALID:
+        return -EFAULT;
+
+    case L1DESC_PAGE_TABLE:
+        level++;
+
+        /*
+         * The address of the L2 descriptor has the following format:
+         * [l1desc<31:10>:gva<19:12>:00] (ARM DDI 0487B.aJ1-6004). Note that
+         * the following address computation already considers that the second
+         * level translation table does not need to be page aligned.
+         */
+        mask = GENMASK(19, 12);
+        paddr = (pte.walk.base << 10) | ((gva % mask) >> 10);
+
+        /* Access the guest's memory to read only one PTE. */
+        ret = vgic_access_guest_memory(d, paddr, &pte, sizeof(short_desc_t), 
false);
+        if ( ret )
+            return -EINVAL;
+
+        if ( pte.walk.dt == L2DESC_INVALID )
+            return -EFAULT;
+
+        if ( pte.pg.page ) /* Small page. */
+        {
+            mask = (1ULL << L2DESC_SMALL_PAGE_SHIFT) - 1;
+            *ipa = (pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | (gva & mask);
+
+            /* Set execute permissions associated with the small page. */
+            if ( !pte.pg.xn )
+                *perms |= GV2M_EXEC;
+        }
+        else /* Large page. */
+        {
+            mask = (1ULL << L2DESC_LARGE_PAGE_SHIFT) - 1;
+            *ipa = (pte.lpg.base << L2DESC_LARGE_PAGE_SHIFT) | (gva & mask);
+
+            /* Set execute permissions associated with the large page. */
+            if ( !pte.lpg.xn )
+                *perms |= GV2M_EXEC;
+        }
+
+        /* Set permissions so that the caller can check the flags by herself. 
*/
+        if ( !pte.pg.ro )
+            *perms |= GV2M_WRITE;
+
+        break;
+
+    case L1DESC_SECTION:
+    case L1DESC_SECTION_PXN:
+        if ( !pte.sec.supersec ) /* Section */
+        {
+            mask = (1ULL << L1DESC_SECTION_SHIFT) - 1;
+            *ipa = (pte.sec.base << L1DESC_SECTION_SHIFT) | (gva & mask);
+        }
+        else /* Supersection */
+        {
+            mask = (1ULL << L1DESC_SUPERSECTION_SHIFT) - 1;
+            *ipa = gva & mask;
+            *ipa |= (paddr_t)(pte.supersec.base) << L1DESC_SUPERSECTION_SHIFT;
+            *ipa |= (paddr_t)(pte.supersec.extbase1) << 
L1DESC_SUPERSECTION_EXT_BASE1_SHIFT;
+            *ipa |= (paddr_t)(pte.supersec.extbase2) << 
L1DESC_SUPERSECTION_EXT_BASE2_SHIFT;
+        }
+
+        /* Set permissions so that the caller can check the flags by herself. 
*/
+        if ( !pte.sec.ro )
+            *perms |= GV2M_WRITE;
+        if ( !pte.sec.xn )
+            *perms |= GV2M_EXEC;
+    }
+
+    return 0;
 }
 
 /*
-- 
2.13.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.