[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 Altp2m cleanup 1/3] altp2m cleanup work
Indent goto labels by one space Inline (header) altp2m functions Define default behavior in switch Define max and min for range of altp2m macroed values Signed-off-by: Paul Lai <paul.c.lai@xxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 46 ++++++++++++++++++++----------------------- xen/include/asm-x86/hvm/hvm.h | 19 +++++++++++++++--- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 22f045e..69daa29 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1926,11 +1926,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, * Otherwise, this is an error condition. */ rc = fall_through; -out_put_gfn: + out_put_gfn: __put_gfn(p2m, gfn); if ( ap2m_active ) __put_gfn(hostp2m, gfn); -out: + out: /* All of these are delayed until we exit, since we might * sleep on event ring wait queues, and we must not hold * locks in such circumstance */ @@ -5207,12 +5207,25 @@ static int do_altp2m_op( return -EFAULT; if ( a.pad1 || a.pad2 || - (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) || - (a.cmd < HVMOP_altp2m_get_domain_state) || - (a.cmd > HVMOP_altp2m_change_gfn) ) + (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ) return -EINVAL; - d = (a.cmd != HVMOP_altp2m_vcpu_enable_notify) ? + switch( a.cmd ) + { + case HVMOP_altp2m_get_domain_state: + case HVMOP_altp2m_set_domain_state: + case HVMOP_altp2m_vcpu_enable_notify: + case HVMOP_altp2m_create_p2m: + case HVMOP_altp2m_destroy_p2m: + case HVMOP_altp2m_switch_p2m: + case HVMOP_altp2m_set_mem_access: + case HVMOP_altp2m_change_gfn: + break; + default: + return -ENOSYS; + } + + d = ( a.cmd != HVMOP_altp2m_vcpu_enable_notify ) ? rcu_lock_domain_by_any_id(a.domain) : rcu_lock_current_domain(); if ( d == NULL ) @@ -5329,6 +5342,8 @@ static int do_altp2m_op( rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view, _gfn(a.u.change_gfn.old_gfn), _gfn(a.u.change_gfn.new_gfn)); + default: + return -EINVAL; } out: @@ -5816,25 +5831,6 @@ void hvm_toggle_singlestep(struct vcpu *v) v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step; } -void altp2m_vcpu_update_p2m(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_update_p2m ) - hvm_funcs.altp2m_vcpu_update_p2m(v); -} - -void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve ) - hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v); -} - -bool_t altp2m_vcpu_emulate_ve(struct vcpu *v) -{ - if ( hvm_funcs.altp2m_vcpu_emulate_ve ) - return hvm_funcs.altp2m_vcpu_emulate_ve(v); - return 0; -} - int hvm_set_mode(struct vcpu *v, int mode) { diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index f486ee9..231c921 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -589,13 +589,26 @@ static inline bool_t hvm_altp2m_supported(void) } /* updates the current hardware p2m */ -void altp2m_vcpu_update_p2m(struct vcpu *v); +static inline void altp2m_vcpu_update_p2m(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_update_p2m ) + hvm_funcs.altp2m_vcpu_update_p2m(v); +} /* updates VMCS fields related to VMFUNC and #VE */ -void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v); +static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve ) + hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v); +} /* emulates #VE */ -bool_t altp2m_vcpu_emulate_ve(struct vcpu *v); +static inline bool_t altp2m_vcpu_emulate_ve(struct vcpu *v) +{ + if ( hvm_funcs.altp2m_vcpu_emulate_ve ) + return hvm_funcs.altp2m_vcpu_emulate_ve(v); + return 0; +} /* Check CR4/EFER values */ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value, -- 2.7.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |