|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/2] x86/domctl: cleanup
On Wed, Apr 01, 2015 at 04:31:02PM +0100, Andrew Cooper wrote:
> * latch curr/currd once at start
> * drop redundant "ret = 0" and braces
> * use "copyback = 1" when appropriate
> * move break statements inside case-specific braced scopes
> * don't bother check for NULL before calling xfree()
> * eliminate trailing whitespace
> * Xen style corrections
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> CC: Keir Fraser <keir@xxxxxxx>
> CC: Jan Beulich <JBeulich@xxxxxxxx>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
> xen/arch/x86/domctl.c | 276
> ++++++++++++++++++++-----------------------------
> 1 file changed, 113 insertions(+), 163 deletions(-)
>
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 32d3fcd..bcbdf95 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1,6 +1,6 @@
>
> /******************************************************************************
> * Arch-specific domctl.c
> - *
> + *
> * Copyright (c) 2002-2006, K A Fraser
> */
>
> @@ -39,7 +39,7 @@
>
> static int gdbsx_guest_mem_io(
> domid_t domid, struct xen_domctl_gdbsx_memio *iop)
> -{
> +{
> ulong l_uva = (ulong)iop->uva;
> iop->remain = dbg_rw_mem(
> (dbgva_t)iop->gva, (dbgbyte_t *)l_uva, iop->len, domid,
> @@ -53,6 +53,8 @@ long arch_do_domctl(
> struct xen_domctl *domctl, struct domain *d,
> XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> + struct vcpu *curr = current;
> + struct domain *currd = curr->domain;
> long ret = 0;
> bool_t copyback = 0;
> unsigned long i;
> @@ -61,15 +63,13 @@ long arch_do_domctl(
> {
>
> case XEN_DOMCTL_shadow_op:
> - {
> ret = paging_domctl(d, &domctl->u.shadow_op,
> guest_handle_cast(u_domctl, void), 0);
> if ( ret == -ERESTART )
> return hypercall_create_continuation(__HYPERVISOR_arch_1,
> "h", u_domctl);
> copyback = 1;
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_ioport_permission:
> {
> @@ -79,8 +79,7 @@ long arch_do_domctl(
>
> if ( (fp + np) <= fp || (fp + np) > MAX_IOPORTS )
> ret = -EINVAL;
> - else if ( !ioports_access_permitted(current->domain,
> - fp, fp + np - 1) ||
> + else if ( !ioports_access_permitted(currd, fp, fp + np - 1) ||
> xsm_ioport_permission(XSM_HOOK, d, fp, fp + np - 1, allow)
> )
> ret = -EPERM;
> else if ( allow )
> @@ -89,8 +88,8 @@ long arch_do_domctl(
> ret = ioports_deny_access(d, fp, fp + np - 1);
> if ( !ret )
> memory_type_changed(d);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_getpageframeinfo:
> {
> @@ -127,16 +126,16 @@ long arch_do_domctl(
> break;
> }
> }
> -
> +
> put_page(page);
> }
>
> copyback = 1;
> + break;
> }
> - break;
>
> case XEN_DOMCTL_getpageframeinfo3:
> - if (!has_32bit_shinfo(current->domain))
> + if ( !has_32bit_shinfo(currd) )
> {
> unsigned int n, j;
> unsigned int num = domctl->u.getpageframeinfo3.num;
> @@ -150,7 +149,7 @@ long arch_do_domctl(
> break;
> }
>
> - page = alloc_domheap_page(current->domain, MEMF_no_owner);
> + page = alloc_domheap_page(currd, MEMF_no_owner);
> if ( !page )
> {
> ret = -ENOMEM;
> @@ -251,7 +250,7 @@ long arch_do_domctl(
> ret = -ENOMEM;
> break;
> }
> -
> +
> ret = 0;
> for ( n = 0; n < num; )
> {
> @@ -266,9 +265,9 @@ long arch_do_domctl(
> ret = -EFAULT;
> break;
> }
> -
> +
> for ( j = 0; j < k; j++ )
> - {
> + {
> struct page_info *page;
> unsigned long gfn = arr32[j];
>
> @@ -320,8 +319,8 @@ long arch_do_domctl(
> }
>
> free_xenheap_page(arr32);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_getmemlist:
> {
> @@ -329,7 +328,8 @@ long arch_do_domctl(
> uint64_t mfn;
> struct page_info *page;
>
> - if ( unlikely(d->is_dying) ) {
> + if ( unlikely(d->is_dying) )
> + {
> ret = -EINVAL;
> break;
> }
> @@ -346,7 +346,7 @@ long arch_do_domctl(
> * rather than trying to fix it we restrict it for the time being.
> */
> if ( /* No nested locks inside copy_to_guest_offset(). */
> - paging_mode_external(current->domain) ||
> + paging_mode_external(currd) ||
> /* Arbitrary limit capping processing time. */
> max_pfns > GB(4) / PAGE_SIZE )
> {
> @@ -375,8 +375,8 @@ long arch_do_domctl(
>
> domctl->u.getmemlist.num_pfns = i;
> copyback = 1;
> + break;
> }
> - break;
>
> case XEN_DOMCTL_hypercall_init:
> {
> @@ -403,15 +403,15 @@ long arch_do_domctl(
> unmap_domain_page(hypercall_page);
>
> put_page_and_type(page);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_sethvmcontext:
> - {
> + {
> struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
>
> ret = -EINVAL;
> - if ( !is_hvm_domain(d) )
> + if ( !is_hvm_domain(d) )
> goto sethvmcontext_out;
>
> ret = -ENOMEM;
> @@ -419,7 +419,7 @@ long arch_do_domctl(
> goto sethvmcontext_out;
>
> ret = -EFAULT;
> - if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) !=
> 0)
> + if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) !=
> 0 )
> goto sethvmcontext_out;
>
> domain_pause(d);
> @@ -427,17 +427,16 @@ long arch_do_domctl(
> domain_unpause(d);
>
> sethvmcontext_out:
> - if ( c.data != NULL )
> - xfree(c.data);
> + xfree(c.data);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gethvmcontext:
> - {
> + {
> struct hvm_domain_context c = { 0 };
>
> ret = -EINVAL;
> - if ( !is_hvm_domain(d) )
> + if ( !is_hvm_domain(d) )
> goto gethvmcontext_out;
>
> c.size = hvm_save_size(d);
> @@ -447,12 +446,12 @@ long arch_do_domctl(
> /* Client is querying for the correct buffer size */
> domctl->u.hvmcontext.size = c.size;
> ret = 0;
> - goto gethvmcontext_out;
> + goto gethvmcontext_out;
> }
>
> /* Check that the client has a big enough buffer */
> ret = -ENOSPC;
> - if ( domctl->u.hvmcontext.size < c.size )
> + if ( domctl->u.hvmcontext.size < c.size )
> goto gethvmcontext_out;
>
> /* Allocate our own marshalling buffer */
> @@ -470,16 +469,13 @@ long arch_do_domctl(
>
> gethvmcontext_out:
> copyback = 1;
> -
> - if ( c.data != NULL )
> - xfree(c.data);
> + xfree(c.data);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gethvmcontext_partial:
> - {
> ret = -EINVAL;
> - if ( !is_hvm_domain(d) )
> + if ( !is_hvm_domain(d) )
> break;
>
> domain_pause(d);
> @@ -487,12 +483,9 @@ long arch_do_domctl(
> domctl->u.hvmcontext_partial.instance,
> domctl->u.hvmcontext_partial.buffer);
> domain_unpause(d);
> - }
> - break;
> -
> + break;
>
> case XEN_DOMCTL_set_address_size:
> - {
> switch ( domctl->u.address_size.size )
> {
> case 32:
> @@ -505,39 +498,25 @@ long arch_do_domctl(
> ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 :
> -EINVAL;
> break;
> }
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_get_address_size:
> - {
> domctl->u.address_size.size =
> is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
> -
> - ret = 0;
> copyback = 1;
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_set_machine_address_size:
> - {
> - ret = -EBUSY;
> if ( d->tot_pages > 0 )
> - break;
> -
> - d->arch.physaddr_bitsize = domctl->u.address_size.size;
> -
> - ret = 0;
> - }
> - break;
> + ret = -EBUSY;
> + else
> + d->arch.physaddr_bitsize = domctl->u.address_size.size;
> + break;
>
> case XEN_DOMCTL_get_machine_address_size:
> - {
> domctl->u.address_size.size = d->arch.physaddr_bitsize;
> -
> - ret = 0;
> copyback = 1;
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_sendtrigger:
> {
> @@ -551,40 +530,34 @@ long arch_do_domctl(
> switch ( domctl->u.sendtrigger.trigger )
> {
> case XEN_DOMCTL_SENDTRIGGER_NMI:
> - {
> ret = 0;
> if ( !test_and_set_bool(v->nmi_pending) )
> vcpu_kick(v);
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_SENDTRIGGER_POWER:
> - {
> ret = -EINVAL;
> - if ( is_hvm_domain(d) )
> + if ( is_hvm_domain(d) )
> {
> ret = 0;
> hvm_acpi_power_button(d);
> }
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_SENDTRIGGER_SLEEP:
> - {
> ret = -EINVAL;
> - if ( is_hvm_domain(d) )
> + if ( is_hvm_domain(d) )
> {
> ret = 0;
> hvm_acpi_sleep_button(d);
> }
> - }
> - break;
> + break;
>
> default:
> ret = -ENOSYS;
> }
> + break;
> }
> - break;
>
> case XEN_DOMCTL_bind_pt_irq:
> {
> @@ -601,7 +574,7 @@ long arch_do_domctl(
>
> irq = domain_pirq_to_irq(d, bind->machine_irq);
> ret = -EPERM;
> - if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
> + if ( irq <= 0 || !irq_access_permitted(currd, irq) )
> break;
>
> ret = -ESRCH;
> @@ -614,8 +587,8 @@ long arch_do_domctl(
> if ( ret < 0 )
> printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for
> dom%d\n",
> ret, d->domain_id);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_unbind_pt_irq:
> {
> @@ -623,7 +596,7 @@ long arch_do_domctl(
> int irq = domain_pirq_to_irq(d, bind->machine_irq);
>
> ret = -EPERM;
> - if ( irq <= 0 || !irq_access_permitted(current->domain, irq) )
> + if ( irq <= 0 || !irq_access_permitted(currd, irq) )
> break;
>
> ret = xsm_unbind_pt_irq(XSM_HOOK, d, bind);
> @@ -639,8 +612,8 @@ long arch_do_domctl(
> if ( ret < 0 )
> printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for
> dom%d\n",
> ret, d->domain_id);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_ioport_mapping:
> {
> @@ -663,7 +636,7 @@ long arch_do_domctl(
> }
>
> ret = -EPERM;
> - if ( !ioports_access_permitted(current->domain, fmp, fmp + np - 1) )
> + if ( !ioports_access_permitted(currd, fmp, fmp + np - 1) )
> break;
>
> ret = xsm_ioport_mapping(XSM_HOOK, d, fmp, fmp + np - 1, add);
> @@ -719,33 +692,29 @@ long arch_do_domctl(
> break;
> }
> ret = ioports_deny_access(d, fmp, fmp + np - 1);
> - if ( ret && is_hardware_domain(current->domain) )
> + if ( ret && is_hardware_domain(currd) )
> printk(XENLOG_ERR
> "ioport_map: error %ld denying dom%d access to
> [%x,%x]\n",
> ret, d->domain_id, fmp, fmp + np - 1);
> }
> if ( !ret )
> memory_type_changed(d);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_pin_mem_cacheattr:
> - {
> ret = hvm_set_mem_pinned_cacheattr(
> d, domctl->u.pin_mem_cacheattr.start,
> domctl->u.pin_mem_cacheattr.end,
> domctl->u.pin_mem_cacheattr.type);
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_set_ext_vcpucontext:
> case XEN_DOMCTL_get_ext_vcpucontext:
> {
> - struct xen_domctl_ext_vcpucontext *evc;
> + struct xen_domctl_ext_vcpucontext *evc = &domctl->u.ext_vcpucontext;
> struct vcpu *v;
>
> - evc = &domctl->u.ext_vcpucontext;
> -
> ret = -ESRCH;
> if ( (evc->vcpu >= d->max_vcpus) ||
> ((v = d->vcpu[evc->vcpu]) == NULL) )
> @@ -753,7 +722,7 @@ long arch_do_domctl(
>
> if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
> {
> - if ( v == current ) /* no vcpu_pause() */
> + if ( v == curr ) /* no vcpu_pause() */
> break;
>
> evc->size = sizeof(*evc);
> @@ -794,7 +763,7 @@ long arch_do_domctl(
> }
> else
> {
> - if ( d == current->domain ) /* no domain_pause() */
> + if ( d == currd ) /* no domain_pause() */
> break;
> ret = -EINVAL;
> if ( evc->size < offsetof(typeof(*evc), vmce) )
> @@ -848,8 +817,8 @@ long arch_do_domctl(
>
> domain_unpause(d);
> }
> + break;
> }
> - break;
>
> case XEN_DOMCTL_set_cpuid:
> {
> @@ -872,60 +841,50 @@ long arch_do_domctl(
> (cpuid->input[1] == ctl->input[1])) )
> break;
> }
> -
> +
> if ( i < MAX_CPUID_INPUT )
> *cpuid = *ctl;
> else if ( unused )
> *unused = *ctl;
> else
> ret = -ENOENT;
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gettscinfo:
> - {
> - xen_guest_tsc_info_t info;
> -
> - ret = -EINVAL;
> - if ( d == current->domain ) /* no domain_pause() */
> - break;
> -
> - domain_pause(d);
> - tsc_get_info(d, &info.tsc_mode,
> - &info.elapsed_nsec,
> - &info.gtsc_khz,
> - &info.incarnation);
> - if ( copy_to_guest(domctl->u.tsc_info.out_info, &info, 1) )
> - ret = -EFAULT;
> + if ( d == currd ) /* no domain_pause() */
> + ret = -EINVAL;
> else
> - ret = 0;
> - domain_unpause(d);
> - }
> - break;
> + {
> + xen_guest_tsc_info_t info;
> +
> + domain_pause(d);
> + tsc_get_info(d, &info.tsc_mode,
> + &info.elapsed_nsec,
> + &info.gtsc_khz,
> + &info.incarnation);
> + domain_unpause(d);
> + copyback = 1;
> + }
> + break;
>
> case XEN_DOMCTL_settscinfo:
> - {
> - ret = -EINVAL;
> - if ( d == current->domain ) /* no domain_pause() */
> - break;
> -
> - domain_pause(d);
> - tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
> - domctl->u.tsc_info.info.elapsed_nsec,
> - domctl->u.tsc_info.info.gtsc_khz,
> - domctl->u.tsc_info.info.incarnation);
> - domain_unpause(d);
> -
> - ret = 0;
> - }
> - break;
> + if ( d == currd ) /* no domain_pause() */
> + ret = -EINVAL;
> + else
> + {
> + domain_pause(d);
> + tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
> + domctl->u.tsc_info.info.elapsed_nsec,
> + domctl->u.tsc_info.info.gtsc_khz,
> + domctl->u.tsc_info.info.incarnation);
> + domain_unpause(d);
> + }
> + break;
>
> case XEN_DOMCTL_suppress_spurious_page_faults:
> - {
> d->arch.suppress_spurious_page_faults = 1;
> - ret = 0;
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_debug_op:
> {
> @@ -941,19 +900,15 @@ long arch_do_domctl(
> break;
>
> ret = hvm_debug_op(v, domctl->u.debug_op.op);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gdbsx_guestmemio:
> - {
> - domctl->u.gdbsx_guest_memio.remain =
> - domctl->u.gdbsx_guest_memio.len;
> -
> + domctl->u.gdbsx_guest_memio.remain = domctl->u.gdbsx_guest_memio.len;
> ret = gdbsx_guest_mem_io(domctl->domain,
> &domctl->u.gdbsx_guest_memio);
> if ( !ret )
> copyback = 1;
> - }
> - break;
> + break;
>
> case XEN_DOMCTL_gdbsx_pausevcpu:
> {
> @@ -967,8 +922,8 @@ long arch_do_domctl(
> (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL )
> break;
> ret = vcpu_pause_by_systemcontroller(v);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gdbsx_unpausevcpu:
> {
> @@ -985,9 +940,9 @@ long arch_do_domctl(
> if ( ret == -EINVAL )
> printk(XENLOG_G_WARNING
> "WARN: d%d attempting to unpause %pv which is not
> paused\n",
> - current->domain->domain_id, v);
> + currd->domain_id, v);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_gdbsx_domstatus:
> {
> @@ -1009,29 +964,26 @@ long arch_do_domctl(
> }
> }
> }
> - ret = 0;
> copyback = 1;
> + break;
> }
> - break;
>
> case XEN_DOMCTL_setvcpuextstate:
> case XEN_DOMCTL_getvcpuextstate:
> {
> - struct xen_domctl_vcpuextstate *evc;
> + struct xen_domctl_vcpuextstate *evc = &domctl->u.vcpuextstate;
> struct vcpu *v;
> uint32_t offset = 0;
>
> #define PV_XSAVE_SIZE(xcr0) (2 * sizeof(uint64_t) + xstate_ctxt_size(xcr0))
>
> - evc = &domctl->u.vcpuextstate;
> -
> ret = -ESRCH;
> if ( (evc->vcpu >= d->max_vcpus) ||
> ((v = d->vcpu[evc->vcpu]) == NULL) )
> goto vcpuextstate_out;
>
> ret = -EINVAL;
> - if ( v == current ) /* no vcpu_pause() */
> + if ( v == curr ) /* no vcpu_pause() */
> goto vcpuextstate_out;
>
> if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
> @@ -1137,31 +1089,26 @@ long arch_do_domctl(
> vcpuextstate_out:
> if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
> copyback = 1;
> + break;
> }
> - break;
>
> case XEN_DOMCTL_mem_sharing_op:
> - {
> ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
> - }
> - break;
> + break;
>
> #if P2M_AUDIT
> case XEN_DOMCTL_audit_p2m:
> - {
> - if ( d == current->domain )
> - {
> + if ( d == currd )
> ret = -EPERM;
> - break;
> + else
> + {
> + audit_p2m(d,
> + &domctl->u.audit_p2m.orphans,
> + &domctl->u.audit_p2m.m2p_bad,
> + &domctl->u.audit_p2m.p2m_bad);
> + copyback = 1;
> }
> -
> - audit_p2m(d,
> - &domctl->u.audit_p2m.orphans,
> - &domctl->u.audit_p2m.m2p_bad,
> - &domctl->u.audit_p2m.p2m_bad);
> - copyback = 1;
> - }
> - break;
> + break;
> #endif /* P2M_AUDIT */
>
> case XEN_DOMCTL_set_broken_page_p2m:
> @@ -1176,8 +1123,8 @@ long arch_do_domctl(
> ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
>
> put_gfn(d, pfn);
> + break;
> }
> - break;
>
> case XEN_DOMCTL_get_vcpu_msrs:
> case XEN_DOMCTL_set_vcpu_msrs:
> @@ -1193,7 +1140,7 @@ long arch_do_domctl(
> break;
>
> ret = -EINVAL;
> - if ( (v == current) || /* no vcpu_pause() */
> + if ( (v == curr) || /* no vcpu_pause() */
> !is_pv_domain(d) )
> break;
>
> @@ -1303,8 +1250,8 @@ long arch_do_domctl(
> copyback = 1;
> }
> }
> + break;
> }
> - break;
>
> case XEN_DOMCTL_psr_cmt_op:
> if ( !psr_cmt_enabled() )
> @@ -1318,16 +1265,19 @@ long arch_do_domctl(
> case XEN_DOMCTL_PSR_CMT_OP_ATTACH:
> ret = psr_alloc_rmid(d);
> break;
> +
> case XEN_DOMCTL_PSR_CMT_OP_DETACH:
> if ( d->arch.psr_rmid > 0 )
> psr_free_rmid(d);
> else
> ret = -ENOENT;
> break;
> +
> case XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID:
> domctl->u.psr_cmt_op.data = d->arch.psr_rmid;
> copyback = 1;
> break;
> +
> default:
> ret = -ENOSYS;
> break;
> --
> 1.7.10.4
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |