[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 Altp2m cleanup v3 3/3] Making altp2m struct dynamically allocated.
Ravi Sahita's dynamically allocated altp2m structs Signed-off-by: Paul Lai <paul.c.lai@xxxxxxxxx> Reviewed-by: Ravi Sahita <ravi.sahita@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 8 +++--- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/mm/altp2m.c | 18 ++++++------- xen/arch/x86/mm/mm-locks.h | 4 +-- xen/arch/x86/mm/p2m-ept.c | 10 ++++---- xen/arch/x86/mm/p2m.c | 61 ++++++++++++++++++++++++-------------------- xen/common/monitor.c | 1 + xen/include/asm-x86/altp2m.h | 7 ++++- xen/include/asm-x86/domain.h | 6 ++--- xen/include/asm-x86/p2m.h | 9 ++++++- 10 files changed, 72 insertions(+), 54 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 1995fa4..115d8e7 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5245,7 +5245,7 @@ static int do_altp2m_op( if ( (a.cmd != HVMOP_altp2m_get_domain_state) && (a.cmd != HVMOP_altp2m_set_domain_state) && - !d->arch.altp2m_active ) + !altp2m_active(d) ) { rc = -EOPNOTSUPP; goto out; @@ -5279,11 +5279,11 @@ static int do_altp2m_op( break; } - ostate = d->arch.altp2m_active; - d->arch.altp2m_active = !!a.u.domain_state.state; + ostate = altp2m_active(d); + set_altp2m_active(d, !!a.u.domain_state.state); /* If the alternate p2m state has changed, handle appropriately */ - if ( d->arch.altp2m_active != ostate && + if ( altp2m_active(d) != ostate && (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) ) { for_each_vcpu( d, v ) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 3d330b6..e7c8d04 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2018,7 +2018,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v) { v->arch.hvm_vmx.secondary_exec_control |= mask; __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING); - __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp)); + __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->altp2m_eptp)); if ( cpu_has_vmx_virt_exceptions ) { diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c index cc9b0fc..eec9ffc 100644 --- a/xen/arch/x86/mm/altp2m.c +++ b/xen/arch/x86/mm/altp2m.c @@ -73,23 +73,23 @@ hvm_altp2m_init( struct domain *d) unsigned int i = 0; /* Init alternate p2m data. */ - if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL ) + if ( (d->arch.altp2m->altp2m_eptp = alloc_xenheap_page()) == NULL ) { rc = -ENOMEM; goto out; } for ( i = 0; i < MAX_EPTP; i++ ) - d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN); + d->arch.altp2m->altp2m_eptp[i] = mfn_x(INVALID_MFN); for ( i = 0; i < MAX_ALTP2M; i++ ) { - rc = p2m_alloc_table(d->arch.altp2m_p2m[i]); + rc = p2m_alloc_table(d->arch.altp2m->altp2m_p2m[i]); if ( rc != 0 ) goto out; } - d->arch.altp2m_active = 0; + set_altp2m_active(d, 0); out: return rc; } @@ -98,16 +98,16 @@ void hvm_altp2m_teardown( struct domain *d) { unsigned int i = 0; - d->arch.altp2m_active = 0; + set_altp2m_active(d, 0); - if ( d->arch.altp2m_eptp ) + if ( d->arch.altp2m->altp2m_eptp ) { - free_xenheap_page(d->arch.altp2m_eptp); - d->arch.altp2m_eptp = NULL; + free_xenheap_page(d->arch.altp2m->altp2m_eptp); + d->arch.altp2m->altp2m_eptp = NULL; } for ( i = 0; i < MAX_ALTP2M; i++ ) - p2m_teardown(d->arch.altp2m_p2m[i]); + p2m_teardown(d->arch.altp2m->altp2m_p2m[i]); } /* diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index 086c8bb..4d17b0a 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -251,8 +251,8 @@ declare_mm_rwlock(p2m); */ declare_mm_lock(altp2mlist) -#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock) -#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock) +#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m->altp2m_list_lock) +#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->altp2m_list_lock) /* P2M lock (per-altp2m-table) * diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 8247a02..74b2754 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -1331,14 +1331,14 @@ void setup_ept_dump(void) void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i) { - struct p2m_domain *p2m = d->arch.altp2m_p2m[i]; + struct p2m_domain *p2m = d->arch.altp2m->altp2m_p2m[i]; struct ept_data *ept; p2m->min_remapped_gfn = gfn_x(INVALID_GFN); - p2m->max_remapped_gfn = 0; + p2m->max_remapped_gfn = gfn_x(_gfn(0UL)); ept = &p2m->ept; ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m)); - d->arch.altp2m_eptp[i] = ept_get_eptp(ept); + d->arch.altp2m->altp2m_eptp[i] = ept_get_eptp(ept); } unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp) @@ -1351,10 +1351,10 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp) for ( i = 0; i < MAX_ALTP2M; i++ ) { - if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) ) continue; - p2m = d->arch.altp2m_p2m[i]; + p2m = d->arch.altp2m->altp2m_p2m[i]; ept = &p2m->ept; if ( eptp == ept_get_eptp(ept) ) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 7673663..956ce3a 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -193,12 +193,15 @@ static void p2m_teardown_altp2m(struct domain *d) for ( i = 0; i < MAX_ALTP2M; i++ ) { - if ( !d->arch.altp2m_p2m[i] ) + if ( !d->arch.altp2m->altp2m_p2m[i] ) continue; - p2m = d->arch.altp2m_p2m[i]; + p2m = d->arch.altp2m->altp2m_p2m[i]; p2m_free_one(p2m); - d->arch.altp2m_p2m[i] = NULL; + d->arch.altp2m->altp2m_p2m[i] = NULL; } + + if ( d->arch.altp2m ) + xfree(d->arch.altp2m); } static int p2m_init_altp2m(struct domain *d) @@ -206,10 +209,14 @@ static int p2m_init_altp2m(struct domain *d) unsigned int i; struct p2m_domain *p2m; - mm_lock_init(&d->arch.altp2m_list_lock); + d->arch.altp2m = xzalloc(struct altp2m_domain); + if ( d->arch.altp2m == NULL ) + return -ENOMEM; + + mm_lock_init(&d->arch.altp2m->altp2m_list_lock); for ( i = 0; i < MAX_ALTP2M; i++ ) { - d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d); + d->arch.altp2m->altp2m_p2m[i] = p2m = p2m_init_one(d); if ( p2m == NULL ) { p2m_teardown_altp2m(d); @@ -1844,10 +1851,10 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, if ( altp2m_idx ) { if ( altp2m_idx >= MAX_ALTP2M || - d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) + d->arch.altp2m->altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) return -EINVAL; - ap2m = d->arch.altp2m_p2m[altp2m_idx]; + ap2m = d->arch.altp2m->altp2m_p2m[altp2m_idx]; } switch ( access ) @@ -2288,7 +2295,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx) altp2m_list_lock(d); - if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) { if ( idx != vcpu_altp2m(v).p2midx ) { @@ -2373,11 +2380,11 @@ void p2m_flush_altp2m(struct domain *d) for ( i = 0; i < MAX_ALTP2M; i++ ) { - p2m_flush_table(d->arch.altp2m_p2m[i]); + p2m_flush_table(d->arch.altp2m->altp2m_p2m[i]); /* Uninit and reinit ept to force TLB shootdown */ - ept_p2m_uninit(d->arch.altp2m_p2m[i]); - ept_p2m_init(d->arch.altp2m_p2m[i]); - d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN); + ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[i]); + ept_p2m_init(d->arch.altp2m->altp2m_p2m[i]); + d->arch.altp2m->altp2m_eptp[i] = mfn_x(INVALID_MFN); } altp2m_list_unlock(d); @@ -2392,7 +2399,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx) altp2m_list_lock(d); - if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) { p2m_init_altp2m_ept_helper(d, idx); rc = 0; @@ -2411,7 +2418,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx) for ( i = 0; i < MAX_ALTP2M; i++ ) { - if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[i] != mfn_x(INVALID_MFN) ) continue; p2m_init_altp2m_ept_helper(d, i); @@ -2437,17 +2444,17 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx) altp2m_list_lock(d); - if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) { - p2m = d->arch.altp2m_p2m[idx]; + p2m = d->arch.altp2m->altp2m_p2m[idx]; if ( !_atomic_read(p2m->active_vcpus) ) { - p2m_flush_table(d->arch.altp2m_p2m[idx]); + p2m_flush_table(d->arch.altp2m->altp2m_p2m[idx]); /* Uninit and reinit ept to force TLB shootdown */ - ept_p2m_uninit(d->arch.altp2m_p2m[idx]); - ept_p2m_init(d->arch.altp2m_p2m[idx]); - d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN); + ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[idx]); + ept_p2m_init(d->arch.altp2m->altp2m_p2m[idx]); + d->arch.altp2m->altp2m_eptp[idx] = mfn_x(INVALID_MFN); rc = 0; } } @@ -2471,7 +2478,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx) altp2m_list_lock(d); - if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) ) { for_each_vcpu( d, v ) if ( idx != vcpu_altp2m(v).p2midx ) @@ -2502,11 +2509,11 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx, unsigned int page_order; int rc = -EINVAL; - if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) + if ( idx >= MAX_ALTP2M || d->arch.altp2m->altp2m_eptp[idx] == mfn_x(INVALID_MFN) ) return rc; hp2m = p2m_get_hostp2m(d); - ap2m = d->arch.altp2m_p2m[idx]; + ap2m = d->arch.altp2m->altp2m_p2m[idx]; p2m_lock(ap2m); @@ -2597,10 +2604,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, for ( i = 0; i < MAX_ALTP2M; i++ ) { - if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) + if ( d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) ) continue; - p2m = d->arch.altp2m_p2m[i]; + p2m = d->arch.altp2m->altp2m_p2m[i]; m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL); /* Check for a dropped page that may impact this altp2m */ @@ -2621,10 +2628,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn, for ( i = 0; i < MAX_ALTP2M; i++ ) { if ( i == last_reset_idx || - d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) + d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) ) continue; - p2m = d->arch.altp2m_p2m[i]; + p2m = d->arch.altp2m->altp2m_p2m[i]; p2m_lock(p2m); p2m_reset_altp2m(p2m); p2m_unlock(p2m); diff --git a/xen/common/monitor.c b/xen/common/monitor.c index c73d1d5..d6b3f90 100644 --- a/xen/common/monitor.c +++ b/xen/common/monitor.c @@ -24,6 +24,7 @@ #include <xen/sched.h> #include <xen/vm_event.h> #include <xsm/xsm.h> +#include <asm/p2m.h> #include <asm/altp2m.h> #include <asm/monitor.h> #include <asm/vm_event.h> diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h index a236ed7..a2e6740 100644 --- a/xen/include/asm-x86/altp2m.h +++ b/xen/include/asm-x86/altp2m.h @@ -24,7 +24,12 @@ /* Alternate p2m HVM on/off per domain */ static inline bool_t altp2m_active(const struct domain *d) { - return d->arch.altp2m_active; + return d->arch.altp2m->altp2m_active; +} + +static inline void set_altp2m_active(const struct domain *d, bool_t v) +{ + d->arch.altp2m->altp2m_active = v; } /* Alternate p2m VCPU */ diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 5807a1f..3b9fcc1 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -242,6 +242,7 @@ typedef xen_domctl_cpuid_t cpuid_input_t; #define INVALID_ALTP2M 0xffff #define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t)) struct p2m_domain; +struct altp2m_domain; struct time_scale { int shift; u32 mul_frac; @@ -320,10 +321,7 @@ struct arch_domain mm_lock_t nested_p2m_lock; /* altp2m: allow multiple copies of host p2m */ - bool_t altp2m_active; - struct p2m_domain *altp2m_p2m[MAX_ALTP2M]; - mm_lock_t altp2m_list_lock; - uint64_t *altp2m_eptp; + struct altp2m_domain *altp2m; /* NB. protected by d->event_lock and by irq_desc[irq].lock */ struct radix_tree_root irq_pirq; diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 0c5b391..d8c40a6 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -338,6 +338,13 @@ struct p2m_domain { }; }; +struct altp2m_domain { + bool_t altp2m_active; + struct p2m_domain *altp2m_p2m[MAX_ALTP2M]; + mm_lock_t altp2m_list_lock; + uint64_t *altp2m_eptp; +}; + /* get host p2m table */ #define p2m_get_hostp2m(d) ((d)->arch.p2m) @@ -778,7 +785,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v) BUG_ON(index >= MAX_ALTP2M); - return v->domain->arch.altp2m_p2m[index]; + return v->domain->arch.altp2m->altp2m_p2m[index]; } /* Switch alternate p2m for a single vcpu */ -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |