[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 34/38] arm/p2m: Add HVMOP_altp2m_change_gfn



Hello Sergej,

On 16/08/16 23:17, Sergej Proskurin wrote:
This commit adds the functionality to change mfn mappings for specified
gfn's in altp2m views. This mechanism can be used within the context of
VMI, e.g., to establish stealthy debugging.

Signed-off-by: Sergej Proskurin <proskurin@xxxxxxxxxxxxx>
---
Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Cc: Julien Grall <julien.grall@xxxxxxx>
---
v3: Moved the altp2m_lock to guard access to d->arch.altp2m_vttbr[idx]
    in altp2m_change_gfn.

    Locked hp2m to prevent hp2m entries from being modified while the
    function "altp2m_change_gfn" is active.

    Removed setting ap2m->mem_access_enabled in "altp2m_change_gfn", as
    we do not need explicitly splitting pages at this point.

    Extended checks allowing to change gfn's in p2m_ram_(rw|ro) memory
    only.

    Moved the funtion "remove_altp2m_entry" out of this commit.
---
 xen/arch/arm/altp2m.c        | 98 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/hvm.c           |  7 +++-
 xen/include/asm-arm/altp2m.h |  6 +++
 3 files changed, 110 insertions(+), 1 deletion(-)

diff --git a/xen/arch/arm/altp2m.c b/xen/arch/arm/altp2m.c
index 2009bad..fa8d526 100644
--- a/xen/arch/arm/altp2m.c
+++ b/xen/arch/arm/altp2m.c
@@ -301,6 +301,104 @@ out:
     return rc;
 }

+int altp2m_change_gfn(struct domain *d,
+                      unsigned int idx,
+                      gfn_t old_gfn,
+                      gfn_t new_gfn)
+{
+    struct p2m_domain *hp2m, *ap2m;
+    mfn_t mfn;
+    p2m_access_t p2ma;
+    p2m_type_t p2mt;
+    unsigned int page_order;
+    int rc = -EINVAL;
+
+    hp2m = p2m_get_hostp2m(d);
+    ap2m = d->arch.altp2m_p2m[idx];
+
+    altp2m_lock(d);

Similar comment as a previous patch, I am not sure to understand you lock using altp2m_lock and not p2m_write_lock(ap2m).

The latter would make more sense as it will limit the number of TLB flush because all the change will be done in a batch.

+    p2m_read_lock(hp2m);
+
+    if ( idx >= MAX_ALTP2M || d->arch.altp2m_p2m[idx] == NULL )
+        goto out;
+
+    mfn = p2m_lookup_attr(ap2m, old_gfn, &p2mt, NULL, &page_order);
+
+    /* Check whether the page needs to be reset. */
+    if ( gfn_eq(new_gfn, INVALID_GFN) )
+    {
+        /* If mfn is mapped by old_gfn, remove old_gfn from the altp2m table. 
*/
+        if ( !mfn_eq(mfn, INVALID_MFN) )
+        {
+            rc = remove_altp2m_entry(ap2m, old_gfn, mfn, page_order);

From my understanding, altp2m_change_gfn is working on page granularity. So here, you would remove a superpage even if the user requested to remove a specific gfn.

+            if ( rc )
+            {
+                rc = -EINVAL;
+                goto out;
+            }
+        }
+
+        rc = 0;
+        goto out;
+    }
+
+    /* Check hostp2m if no valid entry in altp2m present. */
+    if ( mfn_eq(mfn, INVALID_MFN) )
+    {
+        mfn = p2m_lookup_attr(hp2m, old_gfn, &p2mt, &p2ma, &page_order);
+        if ( mfn_eq(mfn, INVALID_MFN) ||
+             /* Allow changing gfn's in p2m_ram_(rw|ro) memory only. */
+             ((p2mt != p2m_ram_rw) && (p2mt != p2m_ram_ro)) )
+        {
+            rc = -EINVAL;
+            goto out;
+        }
+
+        /* If this is a superpage, copy that first. */
+        if ( page_order != THIRD_ORDER )
+        {
+            rc = modify_altp2m_entry(ap2m, old_gfn, mfn, p2mt, p2ma, 
page_order);
+            if ( rc )
+            {
+                rc = -EINVAL;
+                goto out;
+            }
+        }
+    }
+
+    mfn = p2m_lookup_attr(ap2m, new_gfn, &p2mt, &p2ma, &page_order);
+
+    /* If new_gfn is not part of altp2m, get the mapping information from hp2m 
*/
+    if ( mfn_eq(mfn, INVALID_MFN) )
+        mfn = p2m_lookup_attr(hp2m, new_gfn, &p2mt, &p2ma, &page_order);
+
+    if ( mfn_eq(mfn, INVALID_MFN) ||
+         /* Allow changing gfn's in p2m_ram_(rw|ro) memory only. */
+         ((p2mt != p2m_ram_rw) && (p2mt != p2m_ram_ro)) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    /* Set mem access attributes - currently supporting only one (4K) page. */

I don't understand why memaccess matters here. p2m_set_entry is handling for you the memaccess case.

+    page_order = THIRD_ORDER;
+    rc = modify_altp2m_entry(ap2m, old_gfn, mfn, p2mt, p2ma, page_order);
+    if ( rc )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    rc = 0;
+
+out:
+    p2m_read_unlock(hp2m);
+    altp2m_unlock(d);
+
+    return rc;
+}
+
+
 static void altp2m_vcpu_reset(struct vcpu *v)
 {
     struct altp2mvcpu *av = &altp2m_vcpu(v);
diff --git a/xen/arch/arm/hvm.c b/xen/arch/arm/hvm.c
index df78893..c754ad1 100644
--- a/xen/arch/arm/hvm.c
+++ b/xen/arch/arm/hvm.c
@@ -145,7 +145,12 @@ static int do_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg)
         break;

     case HVMOP_altp2m_change_gfn:
-        rc = -EOPNOTSUPP;
+        if ( a.u.change_gfn.pad1 || a.u.change_gfn.pad2 )
+            rc = -EINVAL;
+        else
+            rc = altp2m_change_gfn(d, a.u.change_gfn.view,
+                                   _gfn(a.u.change_gfn.old_gfn),
+                                   _gfn(a.u.change_gfn.new_gfn));
         break;
     }

diff --git a/xen/include/asm-arm/altp2m.h b/xen/include/asm-arm/altp2m.h
index 8e40c45..8b459bf 100644
--- a/xen/include/asm-arm/altp2m.h
+++ b/xen/include/asm-arm/altp2m.h
@@ -97,4 +97,10 @@ int altp2m_propagate_change(struct domain *d,
                             p2m_type_t p2mt,
                             p2m_access_t p2ma);

+/* Change a gfn->mfn mapping */
+int altp2m_change_gfn(struct domain *d,
+                      unsigned int idx,
+                      gfn_t old_gfn,
+                      gfn_t new_gfn);
+
 #endif /* __ASM_ARM_ALTP2M_H */


Regards,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.