|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 04/12] x86/altp2m: basic data structures and support routines.
On 22/06/15 19:56, Ed White wrote:
> Add the basic data structures needed to support alternate p2m's and
> the functions to initialise them and tear them down.
>
> Although Intel hardware can handle 512 EPTP's per hardware thread
> concurrently, only 10 per domain are supported in this patch for
> performance reasons.
>
> The iterator in hap_enable() does need to handle 512, so that is now
> uint16_t.
>
> This change also splits the p2m lock into one lock type for altp2m's
> and another type for all other p2m's. The purpose of this is to place
> the altp2m list lock between the types, so the list lock can be
> acquired whilst holding the host p2m lock.
>
> Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
> ---
> xen/arch/x86/hvm/Makefile | 2 +
> xen/arch/x86/hvm/altp2mhvm.c | 82 ++++++++++++++++++++++++++++
> xen/arch/x86/hvm/hvm.c | 21 ++++++++
> xen/arch/x86/mm/hap/hap.c | 31 ++++++++++-
> xen/arch/x86/mm/mm-locks.h | 33 +++++++++++-
> xen/arch/x86/mm/p2m.c | 103
> ++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/domain.h | 10 ++++
> xen/include/asm-x86/hvm/altp2mhvm.h | 38 +++++++++++++
> xen/include/asm-x86/hvm/hvm.h | 17 ++++++
> xen/include/asm-x86/hvm/vcpu.h | 9 ++++
> xen/include/asm-x86/p2m.h | 30 ++++++++++-
> 11 files changed, 372 insertions(+), 4 deletions(-)
> create mode 100644 xen/arch/x86/hvm/altp2mhvm.c
> create mode 100644 xen/include/asm-x86/hvm/altp2mhvm.h
>
> diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
> index 69af47f..da4475d 100644
> --- a/xen/arch/x86/hvm/Makefile
> +++ b/xen/arch/x86/hvm/Makefile
> @@ -1,6 +1,7 @@
> subdir-y += svm
> subdir-y += vmx
>
> +obj-y += altp2mhvm.o
This file is already in a directory named hvm. I would name it simply
altp2m.c
Similarly, altp2mhvm in function names seems redundant, where altp2m
would suffice. We will never be in a position of offering altp2m to PV
domains.
> obj-y += asid.o
> obj-y += emulate.o
> obj-y += event.o
> @@ -24,3 +25,4 @@ obj-y += vmsi.o
> obj-y += vpic.o
> obj-y += vpt.o
> obj-y += vpmu.o
> +
Spurious whitespace change.
> diff --git a/xen/arch/x86/hvm/altp2mhvm.c b/xen/arch/x86/hvm/altp2mhvm.c
> new file mode 100644
> index 0000000..802fe5b
> --- /dev/null
> +++ b/xen/arch/x86/hvm/altp2mhvm.c
> @@ -0,0 +1,82 @@
> +/*
> + * Alternate p2m HVM
> + * Copyright (c) 2014, Intel Corporation.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms and conditions of the GNU General Public License,
> + * version 2, as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> with
> + * this program; if not, write to the Free Software Foundation, Inc., 59
> Temple
> + * Place - Suite 330, Boston, MA 02111-1307 USA.
> + */
> +
> +#include <asm/hvm/support.h>
> +#include <asm/hvm/hvm.h>
> +#include <asm/p2m.h>
> +#include <asm/hvm/altp2mhvm.h>
> +
> +void
> +altp2mhvm_vcpu_reset(struct vcpu *v)
> +{
> + struct altp2mvcpu *av = &vcpu_altp2mhvm(v);
> +
> + av->p2midx = INVALID_ALTP2M;
> + av->veinfo_gfn = 0;
> +
> + if ( hvm_funcs.ahvm_vcpu_reset )
> + hvm_funcs.ahvm_vcpu_reset(v);
> +}
> +
> +int
> +altp2mhvm_vcpu_initialise(struct vcpu *v)
> +{
> + int rc = -EOPNOTSUPP;
> +
> + if ( v != current )
> + vcpu_pause(v);
> +
> + if ( !hvm_funcs.ahvm_vcpu_initialise ||
> + (hvm_funcs.ahvm_vcpu_initialise(v) == 0) )
> + {
> + rc = 0;
> + altp2mhvm_vcpu_reset(v);
> + vcpu_altp2mhvm(v).p2midx = 0;
> + atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
> +
> + ahvm_vcpu_update_eptp(v);
> + }
> +
> + if ( v != current )
> + vcpu_unpause(v);
> +
> + return rc;
> +}
> +
> +void
> +altp2mhvm_vcpu_destroy(struct vcpu *v)
> +{
> + struct p2m_domain *p2m;
> +
> + if ( v != current )
> + vcpu_pause(v);
> +
> + if ( hvm_funcs.ahvm_vcpu_destroy )
> + hvm_funcs.ahvm_vcpu_destroy(v);
> +
> + if ( (p2m = p2m_get_altp2m(v)) )
> + atomic_dec(&p2m->active_vcpus);
> +
> + altp2mhvm_vcpu_reset(v);
> +
> + ahvm_vcpu_update_eptp(v);
> + ahvm_vcpu_update_vmfunc_ve(v);
> +
> + if ( v != current )
> + vcpu_unpause(v);
> +}
Please put an variable block at the bottom of the file.
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index af68d44..d75c12d 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -58,6 +58,7 @@
> #include <asm/hvm/cacheattr.h>
> #include <asm/hvm/trace.h>
> #include <asm/hvm/nestedhvm.h>
> +#include <asm/hvm/altp2mhvm.h>
> #include <asm/hvm/event.h>
> #include <asm/mtrr.h>
> #include <asm/apic.h>
> @@ -2373,6 +2374,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
> {
> hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
>
> + altp2mhvm_vcpu_destroy(v);
> nestedhvm_vcpu_destroy(v);
>
> free_compat_arg_xlat(v);
> @@ -6486,6 +6488,25 @@ bool_t hvm_altp2m_supported()
> return hvm_funcs.altp2m_supported;
> }
>
> +void ahvm_vcpu_update_eptp(struct vcpu *v)
> +{
> + if (hvm_funcs.ahvm_vcpu_update_eptp)
> + hvm_funcs.ahvm_vcpu_update_eptp(v);
> +}
> +
> +void ahvm_vcpu_update_vmfunc_ve(struct vcpu *v)
> +{
> + if (hvm_funcs.ahvm_vcpu_update_vmfunc_ve)
> + hvm_funcs.ahvm_vcpu_update_vmfunc_ve(v);
> +}
> +
> +bool_t ahvm_vcpu_emulate_ve(struct vcpu *v)
> +{
> + if (hvm_funcs.ahvm_vcpu_emulate_ve)
> + return hvm_funcs.ahvm_vcpu_emulate_ve(v);
> + return 0;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index d0d3f1e..202aa42 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -459,7 +459,7 @@ void hap_domain_init(struct domain *d)
> int hap_enable(struct domain *d, u32 mode)
> {
> unsigned int old_pages;
> - uint8_t i;
> + uint16_t i;
> int rv = 0;
>
> domain_pause(d);
> @@ -498,6 +498,24 @@ int hap_enable(struct domain *d, u32 mode)
> goto out;
> }
>
> + /* Init alternate p2m data */
> + if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
Please use alloc_domheap_page() and map_domain_page_global() so the
allocation is accounted against the domain.
> + {
> + rv = -ENOMEM;
> + goto out;
> + }
> +
> + for (i = 0; i < MAX_EPTP; i++)
> + d->arch.altp2m_eptp[i] = ~0ul;
> +
> + for (i = 0; i < MAX_ALTP2M; i++) {
> + rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
> + if ( rv != 0 )
> + goto out;
> + }
> +
> + d->arch.altp2m_active = 0;
> +
> /* Now let other users see the new mode */
> d->arch.paging.mode = mode | PG_HAP_enable;
>
> @@ -510,6 +528,17 @@ void hap_final_teardown(struct domain *d)
> {
> uint8_t i;
>
> + d->arch.altp2m_active = 0;
> +
> + if ( d->arch.altp2m_eptp ) {
> + free_xenheap_page(d->arch.altp2m_eptp);
> + d->arch.altp2m_eptp = NULL;
> + }
> +
> + for (i = 0; i < MAX_ALTP2M; i++) {
> + p2m_teardown(d->arch.altp2m_p2m[i]);
> + }
> +
> /* Destroy nestedp2m's first */
> for (i = 0; i < MAX_NESTEDP2M; i++) {
> p2m_teardown(d->arch.nested_p2m[i]);
> diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
> index b4f035e..954b345 100644
> --- a/xen/arch/x86/mm/mm-locks.h
> +++ b/xen/arch/x86/mm/mm-locks.h
> @@ -217,7 +217,7 @@ declare_mm_lock(nestedp2m)
> #define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
> #define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
>
> -/* P2M lock (per-p2m-table)
> +/* P2M lock (per-non-alt-p2m-table)
> *
> * This protects all queries and updates to the p2m table.
> * Queries may be made under the read lock but all modifications
> @@ -228,7 +228,36 @@ declare_mm_lock(nestedp2m)
> */
>
> declare_mm_rwlock(p2m);
Normally, we expect the declarations to be in the nesting order, and
sofar, this is the case. Please leave a comment here explaining why the
p2m lock declaration is now removed from the rest of its implementation.
> -#define p2m_lock(p) mm_write_lock(p2m, &(p)->lock);
> +
> +/* Alternate P2M list lock (per-domain)
> + *
> + * A per-domain lock that protects the list of alternate p2m's.
> + * Any operation that walks the list needs to acquire this lock.
> + * Additionally, before destroying an alternate p2m all VCPU's
> + * in the target domain must be paused. */
> +
> +declare_mm_lock(altp2mlist)
> +#define altp2m_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_lock)
> +#define altp2m_unlock(d) mm_unlock(&(d)->arch.altp2m_lock)
> +
> +/* P2M lock (per-altp2m-table)
> + *
> + * This protects all queries and updates to the p2m table.
> + * Queries may be made under the read lock but all modifications
> + * need the main (write) lock.
> + *
> + * The write lock is recursive as it is common for a code path to look
> + * up a gfn and later mutate it.
> + */
> +
> +declare_mm_rwlock(altp2m);
> +#define p2m_lock(p) \
> +{ \
> + if ( p2m_is_altp2m(p) ) \
> + mm_write_lock(altp2m, &(p)->lock); \
> + else \
> + mm_write_lock(p2m, &(p)->lock); \
> +}
> #define p2m_unlock(p) mm_write_unlock(&(p)->lock);
> #define gfn_lock(p,g,o) p2m_lock(p)
> #define gfn_unlock(p,g,o) p2m_unlock(p)
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 1fd1194..87b4b75 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -35,6 +35,7 @@
> #include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
> #include <asm/mem_sharing.h>
> #include <asm/hvm/nestedhvm.h>
> +#include <asm/hvm/altp2mhvm.h>
> #include <asm/hvm/svm/amd-iommu-proto.h>
> #include <xsm/xsm.h>
>
> @@ -183,6 +184,45 @@ static void p2m_teardown_nestedp2m(struct domain *d)
> }
> }
>
> +static void p2m_teardown_altp2m(struct domain *d);
You can avoid this forward declaration by moving the implementation of
p2m_teardown_altp2m() up here.
> +
> +static int p2m_init_altp2m(struct domain *d)
> +{
> + uint8_t i;
> + struct p2m_domain *p2m;
> +
> + mm_lock_init(&d->arch.altp2m_lock);
> + for (i = 0; i < MAX_ALTP2M; i++)
> + {
> + d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
> + if ( p2m == NULL )
> + {
> + p2m_teardown_altp2m(d);
> + return -ENOMEM;
> + }
> + p2m->p2m_class = p2m_alternate;
> + p2m->access_required = 1;
> + _atomic_set(&p2m->active_vcpus, 0);
> + }
> +
> + return 0;
> +}
> +
> +static void p2m_teardown_altp2m(struct domain *d)
> +{
> + uint8_t i;
> + struct p2m_domain *p2m;
> +
> + for (i = 0; i < MAX_ALTP2M; i++)
> + {
> + if ( !d->arch.altp2m_p2m[i] )
> + continue;
> + p2m = d->arch.altp2m_p2m[i];
> + p2m_free_one(p2m);
> + d->arch.altp2m_p2m[i] = NULL;
> + }
> +}
> +
> int p2m_init(struct domain *d)
> {
> int rc;
> @@ -196,7 +236,14 @@ int p2m_init(struct domain *d)
> * (p2m_init runs too early for HVM_PARAM_* options) */
> rc = p2m_init_nestedp2m(d);
> if ( rc )
> + {
> p2m_teardown_hostp2m(d);
> + return rc;
> + }
> +
> + rc = p2m_init_altp2m(d);
> + if ( rc )
> + p2m_teardown_altp2m(d);
>
> return rc;
> }
> @@ -1920,6 +1967,62 @@ int unmap_mmio_regions(struct domain *d,
> return err;
> }
>
> +bool_t p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp, unsigned
> long *idx)
Wouldn't it be better to return index directly, using INVALID_ALTP2M as
a sentinel, rather than returning idx via pointer?
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |