[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V15 2/9] xen/arm: Allow hypervisor access to mem_access protected pages



The hypervisor may use the MMU to verify that the given guest has read/write
access to a given page during hypercalls. As we may have custom mem_access
permissions set on these pages, we do a software-based type checking in case
the MMU based approach failed, but only if mem_access_enabled is set.

These memory accesses are not forwarded to the mem_event listener. Accesses
performed by the hypervisor are currently not part of the mem_access scheme.
This is consistent behaviour with the x86 side as well.

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
Reviewed-by: Julien Grall <julien.grall@xxxxxxxxxx>
---
v15: - Make p2m_lookup a locking-wrapper around non-locking __p2m_lookup
v14: - Move software-based lookup into p2m, add comments and clean it up a bit
       Only page type allowed is rw
       Extend gva_to_ipa to take flags input for lookup validation
v12: - Check for mfn_valid as well.
---
 xen/arch/arm/p2m.c               | 118 +++++++++++++++++++++++++++++++++++++--
 xen/arch/arm/traps.c             |   2 +-
 xen/include/asm-arm/arm32/page.h |   7 ++-
 xen/include/asm-arm/arm64/page.h |   7 ++-
 xen/include/asm-arm/page.h       |   4 +-
 5 files changed, 127 insertions(+), 11 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 137e5a0..f62d283 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -139,7 +139,7 @@ void flush_tlb_domain(struct domain *d)
  * There are no processor functions to do a stage 2 only lookup therefore we
  * do a a software walk.
  */
-paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
+static paddr_t __p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
 {
     struct p2m_domain *p2m = &d->arch.p2m;
     const unsigned int offsets[4] = {
@@ -179,8 +179,6 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr, 
p2m_type_t *t)
     else
         root_table = 0;
 
-    spin_lock(&p2m->lock);
-
     map = __map_domain_page(p2m->root + root_table);
 
     ASSERT(P2M_ROOT_LEVEL < 4);
@@ -215,11 +213,22 @@ paddr_t p2m_lookup(struct domain *d, paddr_t paddr, 
p2m_type_t *t)
         *t = pte.p2m.type;
     }
 
-    spin_unlock(&p2m->lock);
 err:
     return maddr;
 }
 
+paddr_t p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t)
+{
+    paddr_t ret;
+    struct p2m_domain *p2m = &d->arch.p2m;
+
+    spin_lock(&p2m->lock);
+    ret = __p2m_lookup(d, paddr, t);
+    spin_unlock(&p2m->lock);
+
+    return ret;
+}
+
 int guest_physmap_mark_populate_on_demand(struct domain *d,
                                           unsigned long gfn,
                                           unsigned int order)
@@ -1168,6 +1177,103 @@ unsigned long gmfn_to_mfn(struct domain *d, unsigned 
long gpfn)
     return p >> PAGE_SHIFT;
 }
 
+/*
+ * If mem_access is in use it might have been the reason why get_page_from_gva
+ * failed to fetch the page, as it uses the MMU for the permission checking.
+ * Only in these cases we do a software-based type check and fetch the page if
+ * we indeed found a conflicting mem_access setting.
+ */
+static struct page_info*
+p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag)
+{
+    long rc;
+    paddr_t ipa;
+    unsigned long maddr;
+    unsigned long mfn;
+    xenmem_access_t xma;
+    p2m_type_t t;
+    struct page_info *page = NULL;
+
+    rc = gva_to_ipa(gva, &ipa, flag);
+    if ( rc < 0 )
+        goto err;
+
+    /*
+     * We do this first as this is faster in the default case when no
+     * permission is set on the page.
+     */
+    rc = p2m_get_mem_access(current->domain, paddr_to_pfn(ipa), &xma);
+    if ( rc < 0 )
+        goto err;
+
+    /* Let's check if mem_access limited the access. */
+    switch ( xma )
+    {
+    default:
+    case XENMEM_access_rwx:
+    case XENMEM_access_rw:
+        /*
+         * If mem_access contains no rw perm restrictions at all then the 
original
+         * fault was correct.
+         */
+        goto err;
+    case XENMEM_access_n2rwx:
+    case XENMEM_access_n:
+    case XENMEM_access_x:
+        /*
+         * If no r/w is permitted by mem_access, this was a fault caused by 
mem_access.
+         */
+        break;
+    case XENMEM_access_wx:
+    case XENMEM_access_w:
+        /*
+         * If this was a read then it was because of mem_access, but if it was
+         * a write then the original get_page_from_gva fault was correct.
+         */
+        if ( flag == GV2M_READ )
+            break;
+        else
+            goto err;
+    case XENMEM_access_rx2rw:
+    case XENMEM_access_rx:
+    case XENMEM_access_r:
+        /*
+         * If this was a write then it was because of mem_access, but if it was
+         * a read then the original get_page_from_gva fault was correct.
+         */
+        if ( flag == GV2M_WRITE )
+            break;
+        else
+            goto err;
+    }
+
+    /*
+     * We had a mem_access permission limiting the access, but the page type
+     * could also be limiting, so we need to check that as well.
+     */
+    maddr = __p2m_lookup(current->domain, ipa, &t);
+    if ( maddr == INVALID_PADDR )
+        goto err;
+
+    mfn = maddr >> PAGE_SHIFT;
+    if ( !mfn_valid(mfn) )
+        goto err;
+
+    /*
+     * Base type doesn't allow r/w
+     */
+    if ( t != p2m_ram_rw )
+        goto err;
+
+    page = mfn_to_page(mfn);
+
+    if ( unlikely(!get_page(page, current->domain)) )
+        page = NULL;
+
+err:
+    return page;
+}
+
 struct page_info *get_page_from_gva(struct domain *d, vaddr_t va,
                                     unsigned long flags)
 {
@@ -1208,7 +1314,11 @@ struct page_info *get_page_from_gva(struct domain *d, 
vaddr_t va,
         page = NULL;
 
 err:
+    if ( !page && p2m->mem_access_enabled )
+        page = p2m_mem_access_check_and_get_page(va, flags);
+
     spin_unlock(&p2m->lock);
+
     return page;
 }
 
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index aaa9d93..e5d762b 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -2026,7 +2026,7 @@ static void do_trap_data_abort_guest(struct cpu_user_regs 
*regs,
     if (dabt.s1ptw)
         goto bad_data_abort;
 
-    rc = gva_to_ipa(info.gva, &info.gpa);
+    rc = gva_to_ipa(info.gva, &info.gpa, GV2M_READ);
     if ( rc == -EFAULT )
         goto bad_data_abort;
 
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index a07e217..bccdbfc 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -103,11 +103,14 @@ static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned 
int flags)
     WRITE_CP64(tmp, PAR);
     return par;
 }
-static inline uint64_t gva_to_ipa_par(vaddr_t va)
+static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
 {
     uint64_t par, tmp;
     tmp = READ_CP64(PAR);
-    WRITE_CP32(va, ATS1CPR);
+    if ( (flags & GV2M_WRITE) == GV2M_WRITE )
+        WRITE_CP32(va, ATS1CPW);
+    else
+        WRITE_CP32(va, ATS1CPR);
     isb(); /* Ensure result is available. */
     par = READ_CP64(PAR);
     WRITE_CP64(tmp, PAR);
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index e7a761d..29a32cf 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -98,11 +98,14 @@ static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned 
int flags)
     return par;
 }
 
-static inline uint64_t gva_to_ipa_par(vaddr_t va)
+static inline uint64_t gva_to_ipa_par(vaddr_t va, unsigned int flags)
 {
     uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
 
-    asm volatile ("at s1e1r, %0;" : : "r" (va));
+    if ( (flags & GV2M_WRITE) == GV2M_WRITE )
+        asm volatile ("at s1e1w, %0;" : : "r" (va));
+    else
+        asm volatile ("at s1e1r, %0;" : : "r" (va));
     isb();
     par = READ_SYSREG64(PAR_EL1);
     WRITE_SYSREG64(tmp, PAR_EL1);
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 8de5e26..3d89494 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -422,9 +422,9 @@ static inline uint64_t va_to_par(vaddr_t va)
     return par;
 }
 
-static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
+static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr, unsigned int flags)
 {
-    uint64_t par = gva_to_ipa_par(va);
+    uint64_t par = gva_to_ipa_par(va, flags);
     if ( par & PAR_F )
         return -EFAULT;
     *paddr = (par & PADDR_MASK & PAGE_MASK) | ((unsigned long) va & 
~PAGE_MASK);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.