[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 34/39] arm/p2m: Add HVMOP_altp2m_change_gfn



This commit adds the functionality to change mfn mappings for specified
gfn's in altp2m views. This mechanism can be used within the context of
VMI, e.g., to establish stealthy debugging.

Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
v3: Moved the altp2m_lock to guard access to d->arch.altp2m_vttbr[idx]
    in altp2m_change_gfn.

    Locked hp2m to prevent hp2m entries from being modified while the
    function "altp2m_change_gfn" is active.

    Removed setting ap2m->mem_access_enabled in "altp2m_change_gfn", as
    we do not need explicitly splitting pages at this point.

    Extended checks allowing to change gfn's in p2m_ram_(rw|ro) memory
    only.

    Moved the funtion "remove_altp2m_entry" out of this commit.

v4: Cosmetic fixes.

    Moved the initialization of the ap2m pointer after having checked
    that the altp2m index and the associated altp2m view are valid.

    Use the functions "p2m_(set|get)_entry" instead of the helpers
    "p2m_lookup_attr", "remove_altp2m_entry", and "modify_altp2m_entry".

    Removed the call to altp2m_lock in "altp2m_change_gfn" as it is
    sufficient to read lock the host's p2m and write lock the indexed
    altp2m.

    We make sure that we do not remove a superpage by mistake if the
    user requests a specific gfn.

    Removed memaccess-related comment as (i) memaccess is handled by
    "p2m_set_entry" and (ii) we map always only one page and
    "p2m_set_entry" can handle splitting superpages if required.
---
 xen/arch/arm/altp2m.c        | 81 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/hvm.c           |  7 +++-
 xen/include/asm-arm/altp2m.h |  6 ++++
 3 files changed, 93 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/altp2m.c b/xen/arch/arm/altp2m.c
index fd455bdbfc..37820e7b2a 100644
--- a/xen/arch/arm/altp2m.c
+++ b/xen/arch/arm/altp2m.c
@@ -305,6 +305,87 @@ out:
     return rc;
 }
 
+int altp2m_change_gfn(struct domain *d,
+                      unsigned int idx,
+                      gfn_t old_gfn,
+                      gfn_t new_gfn)
+{
+    struct p2m_domain *hp2m, *ap2m;
+    mfn_t mfn;
+    p2m_access_t p2ma;
+    p2m_type_t p2mt;
+    unsigned int page_order;
+    int rc = -EINVAL;
+
+    hp2m = p2m_get_hostp2m(d);
+
+    if ( idx >= MAX_ALTP2M || d->arch.altp2m_p2m[idx] == NULL )
+        return rc;
+
+    ap2m = d->arch.altp2m_p2m[idx];
+
+    p2m_read_lock(hp2m);
+    p2m_write_lock(ap2m);
+
+    mfn = p2m_get_entry(ap2m, old_gfn, &p2mt, NULL, NULL);
+
+    /* Check whether the page needs to be reset. */
+    if ( gfn_eq(new_gfn, INVALID_GFN) )
+    {
+        /* If mfn is mapped by old_gfn, remove old_gfn from the altp2m table. 
*/
+        if ( !mfn_eq(mfn, INVALID_MFN) )
+            rc = p2m_set_entry(ap2m, old_gfn, (1UL << THIRD_ORDER), 
INVALID_MFN,
+                               p2m_invalid, p2m_access_rwx);
+
+        goto out;
+    }
+
+    /* Check hostp2m if no valid entry in altp2m present. */
+    if ( mfn_eq(mfn, INVALID_MFN) )
+    {
+        mfn = p2m_get_entry(hp2m, old_gfn, &p2mt, &p2ma, &page_order);
+
+        if ( mfn_eq(mfn, INVALID_MFN) ||
+             /* Allow changing gfn's in p2m_ram_(rw|ro) memory only. */
+             ((p2mt != p2m_ram_rw) && (p2mt != p2m_ram_ro)) )
+            goto out;
+
+        /* If this is a superpage, copy that first. */
+        if ( page_order != THIRD_ORDER )
+        {
+            /* Align the old_gfn and mfn to the given pager order. */
+            old_gfn = _gfn(gfn_x(old_gfn) & ~((1UL << page_order) - 1));
+            mfn = _mfn(mfn_x(mfn) & ~((1UL << page_order) - 1));
+
+            if ( p2m_set_entry(ap2m, old_gfn, (1UL << page_order), mfn, p2mt, 
p2ma) )
+                goto out;
+        }
+    }
+
+    mfn = p2m_get_entry(ap2m, new_gfn, &p2mt, &p2ma, NULL);
+
+    /* If new_gfn is not part of altp2m, get the mapping information from hp2m 
*/
+    if ( mfn_eq(mfn, INVALID_MFN) )
+        mfn = p2m_get_entry(hp2m, new_gfn, &p2mt, &p2ma, NULL);
+
+    if ( mfn_eq(mfn, INVALID_MFN) ||
+         /* Allow changing gfn's in p2m_ram_(rw|ro) memory only. */
+         ((p2mt != p2m_ram_rw) && (p2mt != p2m_ram_ro)) )
+        goto out;
+
+    if ( p2m_set_entry(ap2m, old_gfn, (1UL << THIRD_ORDER), mfn, p2mt, p2ma) )
+        goto out;
+
+    rc = 0;
+
+out:
+    p2m_write_unlock(ap2m);
+    p2m_read_unlock(hp2m);
+
+    return rc;
+}
+
+
 static void altp2m_vcpu_reset(struct vcpu *v)
 {
     v->arch.ap2m_idx = INVALID_ALTP2M;
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index 7e91f2436d..8cf6db24a6 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -148,7 +148,12 @@ static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
 
     case HVMOP_altp2m_change_gfn:
-        rc = -EOPNOTSUPP;
+        if ( a.u.change_gfn.pad1 || a.u.change_gfn.pad2 )
+            rc = -EINVAL;
+        else
+            rc = altp2m_change_gfn(d, a.u.change_gfn.view,
+                                   _gfn(a.u.change_gfn.old_gfn),
+                                   _gfn(a.u.change_gfn.new_gfn));
         break;
     }
 
diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index 4cdca63f01..f5cf560371 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -93,4 +93,10 @@ int altp2m_propagate_change(struct domain *d,
                             p2m_type_t p2mt,
                             p2m_access_t p2ma);
 
+/* Change a gfn->mfn mapping */
+int altp2m_change_gfn(struct domain *d,
+                      unsigned int idx,
+                      gfn_t old_gfn,
+                      gfn_t new_gfn);
+
 #endif /* __ASM_ARM_ALTP2M_H */
-- 
2.13.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.