[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/5] x86/hvm: Misc non-functional cleanup to the HVM_PARAM infrastructure



> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: 05 September 2018 19:12
> To: Xen-devel <xen-devel@xxxxxxxxxxxxx>
> Cc: Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Jan Beulich
> <JBeulich@xxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Roger Pau Monne
> <roger.pau@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>; Stefano
> Stabellini <sstabellini@xxxxxxxxxx>; Julien Grall <julien.grall@xxxxxxx>
> Subject: [PATCH 4/5] x86/hvm: Misc non-functional cleanup to the
> HVM_PARAM infrastructure
> 
> The parameter marshalling and xsm checks are common to both the set and
> get
> paths.  Lift all of the common code out into do_hvm_op() and let
> hvmop_{get,set}_param() operate on struct xen_hvm_param directly.
> 
> This is somewhat noisy in the functions as each a. reference has to change to
> a-> instead.
> 
> In addition, drop an empty default statement, insert newlines as appropriate
> between cases, and there is no need to update the IDENT_PT on the
> fastpath,
> because the common path after the switch will make the update.
> 
> No functional change, but a net shrink of -328 to do_hvm_op().
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> CC: Jan Beulich <JBeulich@xxxxxxxx>
> CC: Wei Liu <wei.liu2@xxxxxxxxxx>
> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> CC: Paul Durrant <paul.durrant@xxxxxxxxxx>
> CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
> CC: Julien Grall <julien.grall@xxxxxxx>
> ---
>  xen/arch/x86/hvm/hvm.c | 223 +++++++++++++++++++++++-----------------
> ---------
>  1 file changed, 104 insertions(+), 119 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 408e695..c891269 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4065,11 +4065,7 @@ static int hvm_allow_set_param(struct domain *d,
>                                 const struct xen_hvm_param *a)
>  {
>      uint64_t value = d->arch.hvm.params[a->index];
> -    int rc;
> -
> -    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
> -    if ( rc )
> -        return rc;
> +    int rc = 0;
> 
>      switch ( a->index )
>      {
> @@ -4133,62 +4129,46 @@ static int hvm_allow_set_param(struct domain
> *d,
>          if ( value != 0 && a->value != value )
>              rc = -EEXIST;
>          break;
> -    default:
> -        break;
>      }
> 
>      return rc;
>  }
> 
> -static int hvmop_set_param(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> +static int hvmop_set_param(struct domain *d, struct xen_hvm_param *a)
>  {
>      struct domain *curr_d = current->domain;
> -    struct xen_hvm_param a;
> -    struct domain *d;
>      struct vcpu *v;
>      int rc;
> 
> -    if ( copy_from_guest(&a, arg, 1) )
> -        return -EFAULT;
> -
> -    d = rcu_lock_domain_by_any_id(a.domid);
> -    if ( d == NULL )
> -        return -ESRCH;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = hvm_allow_set_param(d, &a);
> +    rc = hvm_allow_set_param(d, a);
>      if ( rc )
>          goto out;
> 
> -    switch ( a.index )
> +    switch ( a->index )
>      {
>      case HVM_PARAM_CALLBACK_IRQ:
> -        hvm_set_callback_via(d, a.value);
> +        hvm_set_callback_via(d, a->value);
>          hvm_latch_shinfo_size(d);
>          break;
> +
>      case HVM_PARAM_TIMER_MODE:
> -        if ( a.value > HVMPTM_one_missed_tick_pending )
> +        if ( a->value > HVMPTM_one_missed_tick_pending )
>              rc = -EINVAL;
>          break;
> +
>      case HVM_PARAM_VIRIDIAN:
> -        if ( (a.value & ~HVMPV_feature_mask) ||
> -             !(a.value & HVMPV_base_freq) )
> +        if ( (a->value & ~HVMPV_feature_mask) ||
> +             !(a->value & HVMPV_base_freq) )
>              rc = -EINVAL;
>          break;
> +
>      case HVM_PARAM_IDENT_PT:
>          /*
>           * Only actually required for VT-x lacking unrestricted_guest
>           * capabilities.  Short circuit the pause if possible.
>           */
>          if ( !paging_mode_hap(d) || !cpu_has_vmx )
> -        {
> -            d->arch.hvm.params[a.index] = a.value;
>              break;
> -        }
> 
>          /*
>           * Update GUEST_CR3 in each VMCS to point at identity map.
> @@ -4201,117 +4181,123 @@ static int hvmop_set_param(
> 
>          rc = 0;
>          domain_pause(d);
> -        d->arch.hvm.params[a.index] = a.value;
> +        d->arch.hvm.params[a->index] = a->value;
>          for_each_vcpu ( d, v )
>              paging_update_cr3(v, false);
>          domain_unpause(d);
> 
>          domctl_lock_release();
>          break;
> +
>      case HVM_PARAM_DM_DOMAIN:
>          /* The only value this should ever be set to is DOMID_SELF */
> -        if ( a.value != DOMID_SELF )
> +        if ( a->value != DOMID_SELF )
>              rc = -EINVAL;
> 
> -        a.value = curr_d->domain_id;
> +        a->value = curr_d->domain_id;
>          break;
> +
>      case HVM_PARAM_ACPI_S_STATE:
>          rc = 0;
> -        if ( a.value == 3 )
> +        if ( a->value == 3 )
>              hvm_s3_suspend(d);
> -        else if ( a.value == 0 )
> +        else if ( a->value == 0 )
>              hvm_s3_resume(d);
>          else
>              rc = -EINVAL;
> -
>          break;
> +
>      case HVM_PARAM_ACPI_IOPORTS_LOCATION:
> -        rc = pmtimer_change_ioport(d, a.value);
> +        rc = pmtimer_change_ioport(d, a->value);
>          break;
> 
>      case HVM_PARAM_NESTEDHVM:
>          rc = xsm_hvm_param_nested(XSM_PRIV, d);
>          if ( rc )
>              break;
> -        if ( a.value > 1 )
> +        if ( a->value > 1 )
>              rc = -EINVAL;
>          /*
>           * Remove the check below once we have
>           * shadow-on-shadow.
>           */
> -        if ( !paging_mode_hap(d) && a.value )
> +        if ( !paging_mode_hap(d) && a->value )
>              rc = -EINVAL;
> -        if ( a.value &&
> +        if ( a->value &&
>               d->arch.hvm.params[HVM_PARAM_ALTP2M] )
>              rc = -EINVAL;
>          /* Set up NHVM state for any vcpus that are already up. */
> -        if ( a.value &&
> +        if ( a->value &&
>               !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
>              for_each_vcpu(d, v)
>                  if ( rc == 0 )
>                      rc = nestedhvm_vcpu_initialise(v);
> -        if ( !a.value || rc )
> +        if ( !a->value || rc )
>              for_each_vcpu(d, v)
>                  nestedhvm_vcpu_destroy(v);
>          break;
> +
>      case HVM_PARAM_ALTP2M:
>          rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
>          if ( rc )
>              break;
> -        if ( a.value > XEN_ALTP2M_limited )
> +        if ( a->value > XEN_ALTP2M_limited )
>              rc = -EINVAL;
> -        if ( a.value &&
> +        if ( a->value &&
>               d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
>              rc = -EINVAL;
>          break;
> 
>      case HVM_PARAM_TRIPLE_FAULT_REASON:
> -        if ( a.value > SHUTDOWN_MAX )
> +        if ( a->value > SHUTDOWN_MAX )
>              rc = -EINVAL;
>          break;
> +
>      case HVM_PARAM_IOREQ_SERVER_PFN:
> -        d->arch.hvm.ioreq_gfn.base = a.value;
> +        d->arch.hvm.ioreq_gfn.base = a->value;
>          break;
> +
>      case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
>      {
>          unsigned int i;
> 
> -        if ( a.value == 0 ||
> -             a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
> +        if ( a->value == 0 ||
> +             a->value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
>          {
>              rc = -EINVAL;
>              break;
>          }
> -        for ( i = 0; i < a.value; i++ )
> +        for ( i = 0; i < a->value; i++ )
>              set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
> 
>          break;
>      }
> +
>      case HVM_PARAM_X87_FIP_WIDTH:
> -        if ( a.value != 0 && a.value != 4 && a.value != 8 )
> +        if ( a->value != 0 && a->value != 4 && a->value != 8 )
>          {
>              rc = -EINVAL;
>              break;
>          }
> -        d->arch.x87_fip_width = a.value;
> +        d->arch.x87_fip_width = a->value;
>          break;
> 
>      case HVM_PARAM_VM86_TSS:
>          /* Hardware would silently truncate high bits. */
> -        if ( a.value != (uint32_t)a.value )
> +        if ( a->value != (uint32_t)a->value )
>          {
>              if ( d == curr_d )
>                  domain_crash(d);
>              rc = -EINVAL;
>          }
>          /* Old hvmloader binaries hardcode the size to 128 bytes. */
> -        if ( a.value )
> -            a.value |= (128ULL << 32) | VM86_TSS_UPDATED;
> -        a.index = HVM_PARAM_VM86_TSS_SIZED;
> +        if ( a->value )
> +            a->value |= (128ULL << 32) | VM86_TSS_UPDATED;
> +        a->index = HVM_PARAM_VM86_TSS_SIZED;
>          break;
> 
>      case HVM_PARAM_VM86_TSS_SIZED:
> -        if ( (a.value >> 32) < sizeof(struct tss32) )
> +        if ( (a->value >> 32) < sizeof(struct tss32) )
>          {
>              if ( d == curr_d )
>                  domain_crash(d);
> @@ -4322,41 +4308,34 @@ static int hvmop_set_param(
>           * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap
>           * plus one padding byte).
>           */
> -        if ( (a.value >> 32) > sizeof(struct tss32) +
> -                               (0x100 / 8) + (0x10000 / 8) + 1 )
> -            a.value = (uint32_t)a.value |
> -                      ((sizeof(struct tss32) + (0x100 / 8) +
> -                                               (0x10000 / 8) + 1) << 32);
> -        a.value |= VM86_TSS_UPDATED;
> +        if ( (a->value >> 32) > sizeof(struct tss32) +
> +                                (0x100 / 8) + (0x10000 / 8) + 1 )
> +            a->value = (uint32_t)a->value |
> +                       ((sizeof(struct tss32) + (0x100 / 8) +
> +                                                (0x10000 / 8) + 1) << 32);
> +        a->value |= VM86_TSS_UPDATED;
>          break;
> 
>      case HVM_PARAM_MCA_CAP:
> -        rc = vmce_enable_mca_cap(d, a.value);
> +        rc = vmce_enable_mca_cap(d, a->value);
>          break;
>      }
> 
>      if ( rc != 0 )
>          goto out;
> 
> -    d->arch.hvm.params[a.index] = a.value;
> +    d->arch.hvm.params[a->index] = a->value;
> 
>      HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
> -                a.index, a.value);
> +                a->index, a->value);
> 
>   out:
> -    rcu_unlock_domain(d);
>      return rc;
>  }
> 
>  static int hvm_allow_get_param(struct domain *d,
>                                 const struct xen_hvm_param *a)
>  {
> -    int rc;
> -
> -    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_param);
> -    if ( rc )
> -        return rc;
> -
>      switch ( a->index )
>      {
>          /* The following parameters can be read by the guest and toolstack. 
> */
> @@ -4371,7 +4350,7 @@ static int hvm_allow_get_param(struct domain *d,
>      case HVM_PARAM_CONSOLE_EVTCHN:
>      case HVM_PARAM_ALTP2M:
>      case HVM_PARAM_X87_FIP_WIDTH:
> -        break;
> +        return 0;
> 
>          /*
>           * The following parameters are intended for toolstack usage only.
> @@ -4397,59 +4376,41 @@ static int hvm_allow_get_param(struct domain
> *d,
>      case HVM_PARAM_IOREQ_SERVER_PFN:
>      case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
>      case HVM_PARAM_MCA_CAP:
> -        if ( d == current->domain )
> -            rc = -EPERM;
> -        break;
> +        return d == current->domain ? -EPERM : 0;
> 
>          /* Hole, deprecated, or out-of-range. */
>      default:
> -        rc = -EINVAL;
> -        break;
> +        return -EINVAL;
>      }
> -
> -    return rc;
>  }
> 
> -static int hvmop_get_param(
> -    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> +static int hvmop_get_param(struct domain *d, struct xen_hvm_param *a)
>  {
> -    struct xen_hvm_param a;
> -    struct domain *d;
>      int rc;
> 
> -    if ( copy_from_guest(&a, arg, 1) )
> -        return -EFAULT;
> -
> -    d = rcu_lock_domain_by_any_id(a.domid);
> -    if ( d == NULL )
> -        return -ESRCH;
> -
> -    rc = -EINVAL;
> -    if ( !is_hvm_domain(d) )
> -        goto out;
> -
> -    rc = hvm_allow_get_param(d, &a);
> +    rc = hvm_allow_get_param(d, a);
>      if ( rc )
> -        goto out;
> +        return rc;
> 
> -    switch ( a.index )
> +    switch ( a->index )
>      {
>      case HVM_PARAM_ACPI_S_STATE:
> -        a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
> +        a->value = d->arch.hvm.is_s3_suspended ? 3 : 0;
>          break;
> 
>      case HVM_PARAM_VM86_TSS:
> -        a.value = (uint32_t)d-
> >arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
> +        a->value = (uint32_t)d-
> >arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
>          break;
> 
>      case HVM_PARAM_VM86_TSS_SIZED:
> -        a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
> -                  ~VM86_TSS_UPDATED;
> +        a->value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
> +                   ~VM86_TSS_UPDATED;
>          break;
> 
>      case HVM_PARAM_X87_FIP_WIDTH:
> -        a.value = d->arch.x87_fip_width;
> +        a->value = d->arch.x87_fip_width;
>          break;
> +
>      case HVM_PARAM_IOREQ_PFN:
>      case HVM_PARAM_BUFIOREQ_PFN:
>      case HVM_PARAM_BUFIOREQ_EVTCHN:
> @@ -4465,23 +4426,19 @@ static int hvmop_get_param(
>              rc = hvm_create_ioreq_server(d, true,
>                                           HVM_IOREQSRV_BUFIOREQ_LEGACY, NULL);
>              if ( rc != 0 && rc != -EEXIST )
> -                goto out;
> +                return rc;
>          }
> 
> -    /*FALLTHRU*/
> +        /* Fallthrough */
>      default:
> -        a.value = d->arch.hvm.params[a.index];
> +        a->value = d->arch.hvm.params[a->index];
>          break;
>      }
> 
> -    rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> -
>      HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
> -                a.index, a.value);
> +                a->index, a->value);
> 
> - out:
> -    rcu_unlock_domain(d);
> -    return rc;
> +    return 0;
>  }
> 
>  /*
> @@ -4896,14 +4853,42 @@ long do_hvm_op(unsigned long op,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          break;
> 
>      case HVMOP_set_param:
> -        rc = hvmop_set_param(
> -            guest_handle_cast(arg, xen_hvm_param_t));
> -        break;
> -
>      case HVMOP_get_param:
> -        rc = hvmop_get_param(
> -            guest_handle_cast(arg, xen_hvm_param_t));
> +    {
> +        struct xen_hvm_param a;
> +        struct domain *d;
> +
> +        rc = -EFAULT;
> +        if ( copy_from_guest(&a, arg, 1) )
> +            break;
> +
> +        rc = -ESRCH;
> +        d = rcu_lock_domain_by_any_id(a.domid);
> +        if ( d == NULL )
> +            break;
> +
> +        rc = -EINVAL;
> +        if ( !is_hvm_domain(d) )
> +            goto param_out;
> +
> +        rc = xsm_hvm_param(XSM_TARGET, d, op);
> +        if ( rc )
> +            goto param_out;
> +
> +        if ( op == HVMOP_set_param )
> +            rc = hvmop_set_param(d, &a);
> +        else
> +        {
> +            rc = hvmop_get_param(d, &a);
> +
> +            if ( !rc && __copy_to_guest(arg, &a, 1) )
> +                rc = -EFAULT;
> +        }
> +
> +    param_out:
> +        rcu_unlock_domain(d);
>          break;
> +    }
> 
>      case HVMOP_flush_tlbs:
>          rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL;
> --
> 2.1.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.