[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 09/12] x86/altp2m: add remaining support routines.



Add the remaining routines required to support enabling the alternate
p2m functionality.

Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c              |  60 +++++-
 xen/arch/x86/mm/hap/Makefile        |   1 +
 xen/arch/x86/mm/hap/altp2m_hap.c    | 103 +++++++++
 xen/arch/x86/mm/p2m-ept.c           |   3 +
 xen/arch/x86/mm/p2m.c               | 405 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/altp2mhvm.h |   4 +
 xen/include/asm-x86/p2m.h           |  33 +++
 7 files changed, 601 insertions(+), 8 deletions(-)
 create mode 100644 xen/arch/x86/mm/hap/altp2m_hap.c

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d75c12d..b758ee1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2786,10 +2786,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     p2m_access_t p2ma;
     mfn_t mfn;
     struct vcpu *v = current;
-    struct p2m_domain *p2m;
+    struct p2m_domain *p2m, *hostp2m;
     int rc, fall_through = 0, paged = 0;
     int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
+    int altp2m_active = 0;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -2845,15 +2846,33 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     {
         if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
-        rc = 1;
-        goto out;
+        return 1;
     }
 
-    p2m = p2m_get_hostp2m(v->domain);
-    mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 
+    altp2m_active = altp2mhvm_active(v->domain);
+
+    /* Take a lock on the host p2m speculatively, to avoid potential
+     * locking order problems later and to handle unshare etc.
+     */
+    hostp2m = p2m_get_hostp2m(v->domain);
+    mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
                               P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 
0),
                               NULL);
 
+    if ( altp2m_active )
+    {
+        if ( altp2mhvm_hap_nested_page_fault(v, gpa, gla, npfec, &p2m) == 1 )
+        {
+            /* entry was lazily copied from host -- retry */
+            __put_gfn(hostp2m, gfn);
+            return 1;
+        }
+
+        mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
+    }
+    else
+        p2m = hostp2m;
+
     /* Check access permissions first, then handle faults */
     if ( mfn_x(mfn) != INVALID_MFN )
     {
@@ -2893,6 +2912,20 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
 
         if ( violation )
         {
+            /* Should #VE be emulated for this fault? */
+            if ( p2m_is_altp2m(p2m) && !cpu_has_vmx_virt_exceptions )
+            {
+                unsigned int sve;
+
+                p2m->get_entry_full(p2m, gfn, &p2mt, &p2ma, 0, NULL, &sve);
+
+                if ( !sve && ahvm_vcpu_emulate_ve(v) )
+                {
+                    rc = 1;
+                    goto out_put_gfn;
+                }
+            }
+
             if ( p2m_mem_access_check(gpa, gla, npfec, &req_ptr) )
             {
                 fall_through = 1;
@@ -2912,7 +2945,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
          (npfec.write_access &&
           (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
     {
-        put_gfn(p2m->domain, gfn);
+        __put_gfn(p2m, gfn);
+        if ( altp2m_active )
+            __put_gfn(hostp2m, gfn);
 
         rc = 0;
         if ( unlikely(is_pvh_vcpu(v)) )
@@ -2941,6 +2976,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
     {
+        rc = 1;
         /*
          * Page log dirty is always done with order 0. If this mfn resides in
          * a large page, we do not change other pages type within that large
@@ -2949,9 +2985,15 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
         if ( npfec.write_access )
         {
             paging_mark_dirty(v->domain, mfn_x(mfn));
+            /* If p2m is really an altp2m, unlock here to avoid lock ordering
+             * violation when the change below is propagated from host p2m */
+            if ( altp2m_active )
+                __put_gfn(p2m, gfn);
             p2m_change_type_one(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+            __put_gfn(altp2m_active ? hostp2m : p2m, gfn);
+
+            goto out;
         }
-        rc = 1;
         goto out_put_gfn;
     }
 
@@ -2961,7 +3003,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     rc = fall_through;
 
 out_put_gfn:
-    put_gfn(p2m->domain, gfn);
+    __put_gfn(p2m, gfn);
+    if ( altp2m_active )
+        __put_gfn(hostp2m, gfn);
 out:
     /* All of these are delayed until we exit, since we might 
      * sleep on event ring wait queues, and we must not hold
diff --git a/xen/arch/x86/mm/hap/Makefile b/xen/arch/x86/mm/hap/Makefile
index 68f2bb5..216cd90 100644
--- a/xen/arch/x86/mm/hap/Makefile
+++ b/xen/arch/x86/mm/hap/Makefile
@@ -4,6 +4,7 @@ obj-y += guest_walk_3level.o
 obj-$(x86_64) += guest_walk_4level.o
 obj-y += nested_hap.o
 obj-y += nested_ept.o
+obj-y += altp2m_hap.o
 
 guest_walk_%level.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/hap/altp2m_hap.c b/xen/arch/x86/mm/hap/altp2m_hap.c
new file mode 100644
index 0000000..899b636
--- /dev/null
+++ b/xen/arch/x86/mm/hap/altp2m_hap.c
@@ -0,0 +1,103 @@
+/******************************************************************************
+ * arch/x86/mm/hap/altp2m_hap.c
+ *
+ * Copyright (c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/paging.h>
+#include <asm/p2m.h>
+#include <asm/hap.h>
+#include <asm/hvm/altp2mhvm.h>
+
+#include "private.h"
+
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_valid
+#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
+#undef page_to_mfn
+#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
+
+/*
+ * If the fault is for a not present entry:
+ *     if the entry in the host p2m has a valid mfn, copy it and retry
+ *     else indicate that outer handler should handle fault
+ *
+ * If the fault is for a present entry:
+ *     indicate that outer handler should handle fault
+ */
+
+int
+altp2mhvm_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
+                                unsigned long gla, struct npfec npfec,
+                                struct p2m_domain **ap2m)
+{
+    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
+    p2m_type_t p2mt;
+    p2m_access_t p2ma;
+    unsigned int page_order;
+    unsigned long gfn, mask;
+    mfn_t mfn;
+    int rv;
+
+    *ap2m = p2m_get_altp2m(v);
+
+    mfn = get_gfn_type_access(*ap2m, gpa >> PAGE_SHIFT, &p2mt, &p2ma,
+                              0, &page_order);
+    __put_gfn(*ap2m, gpa >> PAGE_SHIFT);
+
+    if ( mfn_x(mfn) != INVALID_MFN )
+        return 0;
+
+    mfn = get_gfn_type_access(hp2m, gpa >> PAGE_SHIFT, &p2mt, &p2ma,
+                              0, &page_order);
+    put_gfn(hp2m->domain, gpa >> PAGE_SHIFT);
+
+    if ( mfn_x(mfn) == INVALID_MFN )
+        return 0;
+
+    p2m_lock(*ap2m);
+
+    /* If this is a superpage mapping, round down both frame numbers
+     * to the start of the superpage. */
+    mask = ~((1UL << page_order) - 1);
+    gfn = (gpa >> PAGE_SHIFT) & mask;
+    mfn = _mfn(mfn_x(mfn) & mask);
+
+    rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
+    p2m_unlock(*ap2m);
+
+    if ( rv ) {
+        gdprintk(XENLOG_ERR,
+           "failed to set entry for %#"PRIx64" -> %#"PRIx64"\n",
+           gpa, mfn_x(mfn));
+        domain_crash(hp2m->domain);
+    }
+
+    return 1;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index e7719cf..4411b36 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -849,6 +849,9 @@ out:
     if ( is_epte_present(&old_entry) )
         ept_free_entry(p2m, &old_entry, target);
 
+    if ( rc == 0 && p2m_is_hostp2m(p2m) )
+        p2m_altp2m_propagate_change(d, gfn, mfn, order, p2mt, p2ma);
+
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 389360a..588acd5 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2041,6 +2041,411 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
uint16_t idx)
     return rc;
 }
 
+void p2m_flush_altp2m(struct domain *d)
+{
+    uint16_t i;
+
+    altp2m_lock(d);
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        p2m_flush_table(d->arch.altp2m_p2m[i]);
+        /* Uninit and reinit ept to force TLB shootdown */
+        ept_p2m_uninit(d->arch.altp2m_p2m[i]);
+        ept_p2m_init(d->arch.altp2m_p2m[i]);
+        d->arch.altp2m_eptp[i] = ~0ul;
+    }
+
+    altp2m_unlock(d);
+}
+
+bool_t p2m_init_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+    struct p2m_domain *p2m;
+    struct ept_data *ept;
+    bool_t rc = 0;
+
+    if ( idx > MAX_ALTP2M )
+        return rc;
+
+    altp2m_lock(d);
+
+    if ( d->arch.altp2m_eptp[idx] == ~0ul )
+    {
+        p2m = d->arch.altp2m_p2m[idx];
+        p2m->min_remapped_pfn = ~0ul;
+        p2m->max_remapped_pfn = ~0ul;
+        ept = &p2m->ept;
+        ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+        d->arch.altp2m_eptp[idx] = ept_get_eptp(ept);
+        rc = 1;
+    }
+
+    altp2m_unlock(d);
+    return rc;
+}
+
+bool_t p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
+{
+    struct p2m_domain *p2m;
+    struct ept_data *ept;
+    bool_t rc = 0;
+    uint16_t i;
+
+    altp2m_lock(d);
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        if ( d->arch.altp2m_eptp[i] != ~0ul )
+            continue;
+
+        p2m = d->arch.altp2m_p2m[i];
+        p2m->min_remapped_pfn = ~0ul;
+        p2m->max_remapped_pfn = ~0ul;
+        ept = &p2m->ept;
+        ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+        d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+        *idx = i;
+        rc = 1;
+
+        break;
+    }
+
+    altp2m_unlock(d);
+    return rc;
+}
+
+bool_t p2m_destroy_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+    struct p2m_domain *p2m;
+    struct vcpu *curr = current;
+    struct vcpu *v;
+    bool_t rc = 0;
+
+    if ( !idx || idx > MAX_ALTP2M )
+        return rc;
+
+    if ( curr->domain != d )
+        domain_pause(d);
+    else
+        for_each_vcpu( d, v )
+            if ( curr != v )
+                vcpu_pause(v);
+
+    altp2m_lock(d);
+
+    if ( d->arch.altp2m_eptp[idx] != ~0ul )
+    {
+        p2m = d->arch.altp2m_p2m[idx];
+
+        if ( !_atomic_read(p2m->active_vcpus) )
+        {
+            p2m_flush_table(d->arch.altp2m_p2m[idx]);
+            /* Uninit and reinit ept to force TLB shootdown */
+            ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
+            ept_p2m_init(d->arch.altp2m_p2m[idx]);
+            d->arch.altp2m_eptp[idx] = ~0ul;
+            rc = 1;
+        }
+    }
+
+    altp2m_unlock(d);
+
+    if ( curr->domain != d )
+        domain_unpause(d);
+    else
+        for_each_vcpu( d, v )
+            if ( curr != v )
+                vcpu_unpause(v);
+
+    return rc;
+}
+
+bool_t p2m_switch_domain_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+    struct vcpu *curr = current;
+    struct vcpu *v;
+    bool_t rc = 0;
+
+    if ( idx > MAX_ALTP2M )
+        return rc;
+
+    if ( curr->domain != d )
+        domain_pause(d);
+    else
+        for_each_vcpu( d, v )
+            if ( curr != v )
+                vcpu_pause(v);
+
+    altp2m_lock(d);
+
+    if ( d->arch.altp2m_eptp[idx] != ~0ul )
+    {
+        for_each_vcpu( d, v )
+            if ( idx != vcpu_altp2mhvm(v).p2midx )
+            {
+                atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
+                vcpu_altp2mhvm(v).p2midx = idx;
+                atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+                ahvm_vcpu_update_eptp(v);
+            }
+
+        rc = 1;
+    }
+
+    altp2m_unlock(d);
+
+    if ( curr->domain != d )
+        domain_unpause(d);
+    else
+        for_each_vcpu( d, v )
+            if ( curr != v )
+                vcpu_unpause(v);
+
+    return rc;
+}
+
+bool_t p2m_set_altp2m_mem_access(struct domain *d, uint16_t idx,
+                                 unsigned long pfn, xenmem_access_t access)
+{
+    struct p2m_domain *hp2m, *ap2m;
+    p2m_access_t a, _a;
+    p2m_type_t t;
+    mfn_t mfn;
+    unsigned int page_order;
+    bool_t rc = 0;
+
+    static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+        ACCESS(n),
+        ACCESS(r),
+        ACCESS(w),
+        ACCESS(rw),
+        ACCESS(x),
+        ACCESS(rx),
+        ACCESS(wx),
+        ACCESS(rwx),
+#undef ACCESS
+    };
+
+    if ( idx > MAX_ALTP2M || d->arch.altp2m_eptp[idx] == ~0ul )
+        return 0;
+
+    ap2m = d->arch.altp2m_p2m[idx];
+
+    switch ( access )
+    {
+    case 0 ... ARRAY_SIZE(memaccess) - 1:
+        a = memaccess[access];
+        break;
+    case XENMEM_access_default:
+        a = ap2m->default_access;
+        break;
+    default:
+        return 0;
+    }
+
+    /* If request to set default access */
+    if ( pfn == ~0ul )
+    {
+        ap2m->default_access = a;
+        return 1;
+    }
+
+    hp2m = p2m_get_hostp2m(d);
+
+    p2m_lock(ap2m);
+
+    mfn = ap2m->get_entry(ap2m, pfn, &t, &_a, 0, NULL);
+
+    /* Check host p2m if no valid entry in alternate */
+    if ( !mfn_valid(mfn) )
+    {
+        mfn = hp2m->get_entry(hp2m, pfn, &t, &_a, 0, &page_order);
+
+        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
+            goto out;
+
+        /* If this is a superpage, copy that first */
+        if ( page_order != PAGE_ORDER_4K )
+        {
+            unsigned long gfn, mask;
+            mfn_t mfn2;
+
+            mask = ~((1UL << page_order) - 1);
+            gfn = pfn & mask;
+            mfn2 = _mfn(mfn_x(mfn) & mask);
+
+            if ( ap2m->set_entry(ap2m, gfn, mfn2, page_order, t, _a) )
+                goto out;
+        }
+    }
+
+    if ( !ap2m->set_entry_full(ap2m, pfn, mfn, PAGE_ORDER_4K, t, a,
+                               (current->domain != d)) )
+        rc = 1;
+
+out:
+    p2m_unlock(ap2m);
+    return rc;
+}
+
+bool_t p2m_change_altp2m_pfn(struct domain *d, uint16_t idx,
+                             unsigned long old_pfn, unsigned long new_pfn)
+{
+    struct p2m_domain *hp2m, *ap2m;
+    p2m_access_t a;
+    p2m_type_t t;
+    mfn_t mfn;
+    unsigned int page_order;
+    bool_t rc = 0;
+
+    if ( idx > MAX_ALTP2M || d->arch.altp2m_eptp[idx] == ~0ul )
+        return 0;
+
+    hp2m = p2m_get_hostp2m(d);
+    ap2m = d->arch.altp2m_p2m[idx];
+
+    p2m_lock(ap2m);
+
+    mfn = ap2m->get_entry(ap2m, old_pfn, &t, &a, 0, NULL);
+
+    if ( new_pfn == ~0ul )
+    {
+        if ( mfn_valid(mfn) )
+            p2m_remove_page(ap2m, old_pfn, mfn_x(mfn), PAGE_ORDER_4K);
+        rc = 1;
+        goto out;
+    }
+
+    /* Check host p2m if no valid entry in alternate */
+    if ( !mfn_valid(mfn) )
+    {
+        mfn = hp2m->get_entry(hp2m, old_pfn, &t, &a, 0, &page_order);
+
+        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
+            goto out;
+
+        /* If this is a superpage, copy that first */
+        if ( page_order != PAGE_ORDER_4K )
+        {
+            unsigned long gfn, mask;
+
+            mask = ~((1UL << page_order) - 1);
+            gfn = old_pfn & mask;
+            mfn = _mfn(mfn_x(mfn) & mask);
+
+            if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a) )
+                goto out;
+        }
+    }
+
+    mfn = ap2m->get_entry(ap2m, new_pfn, &t, &a, 0, NULL);
+
+    if ( !mfn_valid(mfn) )
+        mfn = hp2m->get_entry(hp2m, new_pfn, &t, &a, 0, NULL);
+
+    if ( !mfn_valid(mfn) || (t != p2m_ram_rw) )
+        goto out;
+
+    if ( !ap2m->set_entry_full(ap2m, old_pfn, mfn, PAGE_ORDER_4K, t, a,
+                               (current->domain != d)) )
+    {
+        rc = 1;
+
+        if ( ap2m->min_remapped_pfn == ~0ul ||
+             new_pfn < ap2m->min_remapped_pfn )
+            ap2m->min_remapped_pfn = new_pfn;
+        if ( ap2m->max_remapped_pfn == ~0ul ||
+             new_pfn > ap2m->max_remapped_pfn )
+            ap2m->max_remapped_pfn = new_pfn;
+    }
+
+out:
+    p2m_unlock(ap2m);
+    return rc;
+}
+
+static inline void p2m_reset_altp2m(struct p2m_domain *p2m)
+{
+    p2m_flush_table(p2m);
+    /* Uninit and reinit ept to force TLB shootdown */
+    ept_p2m_uninit(p2m);
+    ept_p2m_init(p2m);
+    p2m->min_remapped_pfn = ~0ul;
+    p2m->max_remapped_pfn = ~0ul;
+}
+
+void p2m_altp2m_propagate_change(struct domain *d, unsigned long gfn,
+                                 mfn_t mfn, unsigned int page_order,
+                                 p2m_type_t p2mt, p2m_access_t p2ma)
+{
+    struct p2m_domain *p2m;
+    p2m_access_t a;
+    p2m_type_t t;
+    mfn_t m;
+    uint16_t i;
+    bool_t reset_p2m;
+    unsigned int reset_count = 0;
+    uint16_t last_reset_idx = ~0;
+
+    if ( !altp2mhvm_active(d) )
+        return;
+
+    altp2m_lock(d);
+
+    for ( i = 0; i < MAX_ALTP2M; i++ )
+    {
+        if ( d->arch.altp2m_eptp[i] == ~0ul )
+            continue;
+
+        p2m = d->arch.altp2m_p2m[i];
+        m = get_gfn_type_access(p2m, gfn, &t, &a, 0, NULL);
+
+        reset_p2m = 0;
+
+        /* Check for a dropped page that may impact this altp2m */
+        if ( mfn_x(mfn) == INVALID_MFN &&
+             gfn >= p2m->min_remapped_pfn && gfn <= p2m->max_remapped_pfn )
+            reset_p2m = 1;
+
+        if ( reset_p2m )
+        {
+            if ( !reset_count++ )
+            {
+                p2m_reset_altp2m(p2m);
+                last_reset_idx = i;
+            }
+            else
+            {
+                /* At least 2 altp2m's impacted, so reset everything */
+                __put_gfn(p2m, gfn);
+
+                for ( i = 0; i < MAX_ALTP2M; i++ )
+                {
+                    if ( i == last_reset_idx ||
+                         d->arch.altp2m_eptp[i] == ~0ul )
+                        continue;
+
+                    p2m = d->arch.altp2m_p2m[i];
+                    p2m_lock(p2m);
+                    p2m_reset_altp2m(p2m);
+                    p2m_unlock(p2m);
+                }
+
+                goto out;
+            }
+        }
+        else if ( mfn_x(m) != INVALID_MFN )
+           p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+
+        __put_gfn(p2m, gfn);
+    }
+
+out:
+    altp2m_unlock(d);
+}
+
 /*** Audit ***/
 
 #if P2M_AUDIT
diff --git a/xen/include/asm-x86/hvm/altp2mhvm.h 
b/xen/include/asm-x86/hvm/altp2mhvm.h
index a4b8e24..08ff79b 100644
--- a/xen/include/asm-x86/hvm/altp2mhvm.h
+++ b/xen/include/asm-x86/hvm/altp2mhvm.h
@@ -34,5 +34,9 @@ int altp2mhvm_vcpu_initialise(struct vcpu *v);
 void altp2mhvm_vcpu_destroy(struct vcpu *v);
 void altp2mhvm_vcpu_reset(struct vcpu *v);
 
+/* Alternate p2m paging */
+int altp2mhvm_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
+    unsigned long gla, struct npfec npfec, struct p2m_domain **ap2m);
+
 #endif /* _HVM_ALTP2M_H */
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d84da33..3f17211 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -279,6 +279,11 @@ struct p2m_domain {
     /* Highest guest frame that's ever been mapped in the p2m */
     unsigned long max_mapped_pfn;
 
+    /* Alternate p2m's only: range of pfn's for which underlying
+     * mfn may have duplicate mappings */
+    unsigned long min_remapped_pfn;
+    unsigned long max_remapped_pfn;
+
     /* When releasing shared gfn's in a preemptible manner, recall where
      * to resume the search */
     unsigned long next_shared_gfn_to_relinquish;
@@ -766,6 +771,34 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
uint16_t idx);
 void p2m_mem_access_altp2m_check(struct vcpu *v,
                                  const vm_event_response_t *rsp);
 
+/* Flush all the alternate p2m's for a domain */
+void p2m_flush_altp2m(struct domain *d);
+
+/* Make a specific alternate p2m valid */
+bool_t p2m_init_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Find an available alternate p2m and make it valid */
+bool_t p2m_init_next_altp2m(struct domain *d, uint16_t *idx);
+
+/* Make a specific alternate p2m invalid */
+bool_t p2m_destroy_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Switch alternate p2m for entire domain */
+bool_t p2m_switch_domain_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Set access type for a pfn */
+bool_t p2m_set_altp2m_mem_access(struct domain *d, uint16_t idx,
+                                 unsigned long pfn, xenmem_access_t access);
+
+/* Replace a pfn with a different pfn */
+bool_t p2m_change_altp2m_pfn(struct domain *d, uint16_t idx,
+                             unsigned long old_pfn, unsigned long new_pfn);
+
+/* Propagate a host p2m change to all alternate p2m's */
+void p2m_altp2m_propagate_change(struct domain *d, unsigned long gfn,
+                                 mfn_t mfn, unsigned int page_order,
+                                 p2m_type_t p2mt, p2m_access_t p2ma);
+
 /*
  * p2m type to IOMMU flags
  */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.