[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v14 05/11] x86/hvm: Introduce hvm_save_cpu_msrs_one func



>>> On 25.07.18 at 14:14, <aisaila@xxxxxxxxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1366,69 +1366,80 @@ static const uint32_t msrs_to_send[] = {
>  };
>  static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
>  
> -static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
> +static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h)
>  {
> -    struct vcpu *v;
> +    struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
> +    struct hvm_msr *ctxt;
> +    unsigned int i;
> +    int err = 0;

Pointless initializer again.

> -    for_each_vcpu ( d, v )
> +    err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
> +                         HVM_CPU_MSR_SIZE(msr_count_max));
> +    if ( err )
> +        return err;
> +    ctxt = (struct hvm_msr *)&h->data[h->cur];
> +    ctxt->count = 0;
> +
> +    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
>      {
> -        struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
> -        struct hvm_msr *ctxt;
> -        unsigned int i;
> +        uint64_t val;
> +        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
>  
> -        if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
> -                             HVM_CPU_MSR_SIZE(msr_count_max)) )
> -            return 1;
> -        ctxt = (struct hvm_msr *)&h->data[h->cur];
> -        ctxt->count = 0;
> +        /*
> +         * It is the programmers responsibility to ensure that
> +         * msrs_to_send[] contain generally-read/write MSRs.
> +         * X86EMUL_EXCEPTION here implies a missing feature, and that the
> +         * guest doesn't have access to the MSR.
> +         */
> +        if ( rc == X86EMUL_EXCEPTION )
> +            continue;
>  
> -        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
> +        if ( rc != X86EMUL_OKAY )
>          {
> -            uint64_t val;
> -            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
> +            ASSERT_UNREACHABLE();
> +            return -ENXIO;
> +        }
>  
> -            /*
> -             * It is the programmers responsibility to ensure that
> -             * msrs_to_send[] contain generally-read/write MSRs.
> -             * X86EMUL_EXCEPTION here implies a missing feature, and that the
> -             * guest doesn't have access to the MSR.
> -             */
> -            if ( rc == X86EMUL_EXCEPTION )
> -                continue;
> +        if ( !val )
> +            continue; /* Skip empty MSRs. */
>  
> -            if ( rc != X86EMUL_OKAY )
> -            {
> -                ASSERT_UNREACHABLE();
> -                return -ENXIO;
> -            }
> +        ctxt->msr[ctxt->count].index = msrs_to_send[i];
> +        ctxt->msr[ctxt->count++].val = val;
> +    }
>  
> -            if ( !val )
> -                continue; /* Skip empty MSRs. */
> +    if ( hvm_funcs.save_msr )
> +        hvm_funcs.save_msr(v, ctxt);
>  
> -            ctxt->msr[ctxt->count].index = msrs_to_send[i];
> -            ctxt->msr[ctxt->count++].val = val;
> -        }
> +    ASSERT(ctxt->count <= msr_count_max);
>  
> -        if ( hvm_funcs.save_msr )
> -            hvm_funcs.save_msr(v, ctxt);
> +    for ( i = 0; i < ctxt->count; ++i )
> +        ctxt->msr[i]._rsvd = 0;
>  
> -        ASSERT(ctxt->count <= msr_count_max);
> +    if ( ctxt->count )
> +    {
> +        /* Rewrite length to indicate how much space we actually used. */
> +        desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
> +        h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
> +    }
> +    else
> +        /* or rewind and remove the descriptor from the stream. */
> +        h->cur -= sizeof(struct hvm_save_descriptor);
> +    return 0;
> +}

And blank line ahead of the return again please. I'm not going to
repeat these (or other sufficiently generic remarks given earlier),
and I'll assume you'll apply them to the entire series.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.