[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v13 1/3] xen/mem_sharing: VM forking



On Mon, Apr 6, 2020 at 4:52 AM Roger Pau Monné <roger.pau@xxxxxxxxxx> wrote:
>
> On Mon, Mar 30, 2020 at 08:02:08AM -0700, Tamas K Lengyel wrote:
> > VM forking is the process of creating a domain with an empty memory space 
> > and a
> > parent domain specified from which to populate the memory when necessary. 
> > For
> > the new domain to be functional the VM state is copied over as part of the 
> > fork
> > operation (HVM params, hap allocation, etc).
> >
> > Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
> > Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> > +static int bring_up_vcpus(struct domain *cd, struct domain *d)
> > +{
> > +    unsigned int i;
> > +    int ret = -EINVAL;
> > +
> > +    if ( d->max_vcpus != cd->max_vcpus ||
> > +        (ret = cpupool_move_domain(cd, d->cpupool)) )
> > +        return ret;
> > +
> > +    for ( i = 0; i < cd->max_vcpus; i++ )
> > +    {
> > +        if ( !d->vcpu[i] || cd->vcpu[i] )
> > +            continue;
> > +
> > +        if ( !vcpu_create(cd, i) )
> > +            return -EINVAL;
> > +    }
> > +
> > +    domain_update_node_affinity(cd);
> > +    return 0;
> > +}
> > +
> > +static int copy_vcpu_settings(struct domain *cd, struct domain *d)
>
> Nit: AFAICT *d can be constified.

Sure.

>
> > +{
> > +    unsigned int i;
> > +    struct p2m_domain *p2m = p2m_get_hostp2m(cd);
> > +    int ret = -EINVAL;
> > +
> > +    for ( i = 0; i < cd->max_vcpus; i++ )
> > +    {
> > +        const struct vcpu *d_vcpu = d->vcpu[i];
> > +        struct vcpu *cd_vcpu = cd->vcpu[i];
> > +        struct vcpu_runstate_info runstate;
> > +        mfn_t vcpu_info_mfn;
> > +
> > +        if ( !d_vcpu || !cd_vcpu )
> > +            continue;
> > +
> > +        /* Copy & map in the vcpu_info page if the guest uses one */
> > +        vcpu_info_mfn = d_vcpu->vcpu_info_mfn;
> > +        if ( !mfn_eq(vcpu_info_mfn, INVALID_MFN) )
> > +        {
> > +            mfn_t new_vcpu_info_mfn = cd_vcpu->vcpu_info_mfn;
> > +
> > +            /* Allocate & map the page for it if it hasn't been already */
> > +            if ( mfn_eq(new_vcpu_info_mfn, INVALID_MFN) )
> > +            {
> > +                gfn_t gfn = mfn_to_gfn(d, vcpu_info_mfn);
> > +                unsigned long gfn_l = gfn_x(gfn);
> > +                struct page_info *page;
> > +
> > +                if ( !(page = alloc_domheap_page(cd, 0)) )
> > +                    return -ENOMEM;
> > +
> > +                new_vcpu_info_mfn = page_to_mfn(page);
> > +                set_gpfn_from_mfn(mfn_x(new_vcpu_info_mfn), gfn_l);
> > +
> > +                ret = p2m->set_entry(p2m, gfn, new_vcpu_info_mfn,
> > +                                     PAGE_ORDER_4K, p2m_ram_rw,
> > +                                     p2m->default_access, -1);
> > +                if ( ret )
> > +                    return ret;
> > +
> > +                ret = map_vcpu_info(cd_vcpu, gfn_l,
> > +                                    PAGE_OFFSET(d_vcpu->vcpu_info));
> > +                if ( ret )
> > +                    return ret;
> > +            }
> > +
> > +            copy_domain_page(new_vcpu_info_mfn, vcpu_info_mfn);
> > +        }
> > +
> > +        /* Setup the vCPU runstate area */
> > +        if ( !guest_handle_is_null(runstate_guest(d_vcpu)) )
> > +        {
> > +            runstate_guest(cd_vcpu) = runstate_guest(d_vcpu);
> > +            vcpu_runstate_get(cd_vcpu, &runstate);
> > +            __copy_to_guest(runstate_guest(cd_vcpu), &runstate, 1);
>
> I just realized there's no need to copy the runstate area contents
> here, since they will get copied anyway in schedule_tail before
> resuming execution og cd_vcpu as long as runstate_guest is set.
>
> Note that the vcpu_info needs to be copied since it contains event
> channel info which is not unconditionally updated on context switch
> IIRC.

OK

>
> > +        }
> > +
> > +        /*
> > +         * TODO: to support VMs with PV interfaces copy additional
> > +         * settings here, such as PV timers.
> > +         */
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +static int fork_hap_allocation(struct domain *cd, struct domain *d)
> > +{
> > +    int rc;
> > +    bool preempted;
> > +    unsigned long mb = hap_get_allocation(d);
> > +
> > +    if ( mb == hap_get_allocation(cd) )
> > +        return 0;
> > +
> > +    paging_lock(cd);
> > +    rc = hap_set_allocation(cd, mb << (20 - PAGE_SHIFT), &preempted);
> > +    paging_unlock(cd);
> > +
> > +    return preempted ? -ERESTART : rc;
> > +}
> > +
> > +static void copy_tsc(struct domain *cd, struct domain *d)
> > +{
> > +    uint32_t tsc_mode;
> > +    uint32_t gtsc_khz;
> > +    uint32_t incarnation;
> > +    uint64_t elapsed_nsec;
> > +
> > +    tsc_get_info(d, &tsc_mode, &elapsed_nsec, &gtsc_khz, &incarnation);
> > +    /* Don't bump incarnation on set */
> > +    tsc_set_info(cd, tsc_mode, elapsed_nsec, gtsc_khz, incarnation - 1);
> > +}
> > +
> > +static int copy_special_pages(struct domain *cd, struct domain *d)
> > +{
> > +    mfn_t new_mfn, old_mfn;
> > +    struct p2m_domain *p2m = p2m_get_hostp2m(cd);
> > +    static const unsigned int params[] =
> > +    {
> > +        HVM_PARAM_STORE_PFN,
> > +        HVM_PARAM_IOREQ_PFN,
> > +        HVM_PARAM_BUFIOREQ_PFN,
> > +        HVM_PARAM_CONSOLE_PFN
> > +    };
> > +    unsigned int i;
> > +    int rc;
> > +
> > +    for ( i = 0; i < ARRAY_SIZE(params); i++ )
> > +    {
> > +        p2m_type_t t;
> > +        uint64_t value = 0;
> > +        struct page_info *page;
> > +
> > +        if ( hvm_get_param(cd, params[i], &value) || !value )
>
> Don't you need to use d here instead of cd? You want to check whether
> the parent has this parameter set in order to copy it to the child I
> think.

Indeed, I probably made this error in one of the revisions when I
renamed the variable.

>
> With that:
>
> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Thanks,
Tamas



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.