[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v8 6/8] common/domain: add a domain context record for shared_info...



On 15.09.2020 18:17, Paul Durrant wrote:
> +static int load_shared_info(struct domain *d, struct domain_context *c)
> +{
> +    struct domain_shared_info_context ctxt;
> +    size_t hdr_size = offsetof(typeof(ctxt), buffer);
> +    unsigned int i;
> +    int rc;
> +
> +    rc = DOMAIN_LOAD_BEGIN(SHARED_INFO, c, &i);
> +    if ( rc )
> +        return rc;
> +
> +    if ( i ) /* expect only a single instance */
> +        return -ENXIO;
> +
> +    rc = domain_load_data(c, &ctxt, hdr_size);
> +    if ( rc )
> +        return rc;
> +
> +    if ( ctxt.buffer_size > sizeof(shared_info_t) ||
> +         (ctxt.flags & ~DOMAIN_SAVE_32BIT_SHINFO) )
> +        return -EINVAL;
> +
> +    if ( ctxt.flags & DOMAIN_SAVE_32BIT_SHINFO )
> +    {
> +#ifdef CONFIG_COMPAT
> +        has_32bit_shinfo(d) = true;
> +#else
> +        return -EINVAL;
> +#endif
> +    }
> +
> +    if ( is_pv_domain(d) )
> +    {
> +        shared_info_t *shinfo = xmalloc(shared_info_t);
> +
> +        rc = domain_load_data(c, shinfo, sizeof(*shinfo));

You need to check the allocation's success first. But of course the
question is why you don't read directly into d->shared_info. The
domain is paused at this point, isn't it?

> +        if ( rc )
> +        {
> +            xfree(shinfo);
> +            return rc;
> +        }
> +
> +#ifdef CONFIG_COMPAT
> +        if ( has_32bit_shinfo(d) )
> +        {
> +            memcpy(&d->shared_info->compat.vcpu_info,
> +                   &shinfo->compat.vcpu_info,
> +                   sizeof(d->shared_info->compat.vcpu_info));
> +            memcpy(&d->shared_info->compat.arch,
> +                   &shinfo->compat.arch,
> +                   sizeof(d->shared_info->compat.vcpu_info));
> +            memset(&d->shared_info->compat.evtchn_pending,
> +                   0,
> +                   sizeof(d->shared_info->compat.evtchn_pending));
> +            memset(&d->shared_info->compat.evtchn_mask,
> +                   0xff,
> +                   sizeof(d->shared_info->compat.evtchn_mask));
> +
> +            d->shared_info->compat.arch.pfn_to_mfn_frame_list_list = 0;
> +            for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
> +                d->shared_info->compat.vcpu_info[i].evtchn_pending_sel = 0;
> +        }
> +        else
> +        {
> +            memcpy(&d->shared_info->native.vcpu_info,
> +                   &shinfo->native.vcpu_info,
> +                   sizeof(d->shared_info->native.vcpu_info));
> +            memcpy(&d->shared_info->native.arch,
> +                   &shinfo->native.arch,
> +                   sizeof(d->shared_info->native.arch));
> +            memset(&d->shared_info->native.evtchn_pending,
> +                   0,
> +                   sizeof(d->shared_info->compat.evtchn_pending));
> +            memset(&d->shared_info->native.evtchn_mask,
> +                   0xff,
> +                   sizeof(d->shared_info->native.evtchn_mask));
> +
> +            d->shared_info->native.arch.pfn_to_mfn_frame_list_list = 0;
> +            for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
> +                d->shared_info->native.vcpu_info[i].evtchn_pending_sel = 0;
> +        }
> +#else
> +        memcpy(&d->shared_info->vcpu_info,
> +               &shinfo->vcpu_info,
> +               sizeof(d->shared_info->vcpu_info));
> +        memcpy(&d->shared_info->arch,
> +               &shinfo->arch,
> +               sizeof(d->shared_info->shared));
> +        memset(&d->shared_info->evtchn_pending,
> +               0,
> +               sizeof(d->shared_info->evtchn_pending));
> +        memset(&d->shared_info->evtchn_mask,
> +               0xff,
> +               sizeof(d->shared_info->evtchn_mask));
> +
> +        d->shared_info.arch.pfn_to_mfn_frame_list_list = 0;
> +        for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
> +            d->shared_info.vcpu_info[i].evtchn_pending_sel = 0;
> +#endif

A lot of redundancy; maybe it gets better if indeed you stop reading
into an intermediate buffer.

> +        xfree(shinfo);
> +
> +        rc = domain_load_end(c, false);
> +    }
> +    else
> +        rc = domain_load_end(c, true);

Perhaps at least a brief comment here wouldn't hurt regarding the
needs (or lack thereof) for HVM / Arm?

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.