[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v5 RFC 14/14] tools/libxc: noarch restore code



On 19/06/14 07:16, Hongyang Yang wrote:
> On 06/12/2014 02:14 AM, Andrew Cooper wrote:
>> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
>> Signed-off-by: Frediano Ziglio <frediano.ziglio@xxxxxxxxxx>
>> Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
>> ---
>>   tools/libxc/saverestore/common.h  |    6 +
>>   tools/libxc/saverestore/restore.c |  556
>> ++++++++++++++++++++++++++++++++++++-
>>   2 files changed, 561 insertions(+), 1 deletion(-)
>>
>> diff --git a/tools/libxc/saverestore/common.h
>> b/tools/libxc/saverestore/common.h
>> index e16e0de..2d44961 100644
>> --- a/tools/libxc/saverestore/common.h
>> +++ b/tools/libxc/saverestore/common.h
>> @@ -292,6 +292,12 @@ static inline int write_record(struct context
>> *ctx, struct record *rec)
>>       return write_split_record(ctx, rec, NULL, 0);
>>   }
>>
> ...snip...
>> +/*
>> + * Given a list of pfns, their types, and a block of page data from the
>> + * stream, populate and record their types, map the relevent subset
>> and copy
>> + * the data into the guest.
>> + */
>> +static int process_page_data(struct context *ctx, unsigned count,
>> +                             xen_pfn_t *pfns, uint32_t *types, void
>> *page_data)
>> +{
>> +    xc_interface *xch = ctx->xch;
>> +    xen_pfn_t *mfns = malloc(count * sizeof(*mfns));
>> +    int *map_errs = malloc(count * sizeof(*map_errs));
>> +    int rc = -1;
>> +    void *mapping = NULL, *guest_page = NULL;
>> +    unsigned i,    /* i indexes the pfns from the record. */
>> +        j,         /* j indexes the subset of pfns we decide to map. */
>> +        nr_pages;
>> +
>> +    if ( !mfns || !map_errs )
>> +    {
>> +        ERROR("Failed to allocate %zu bytes to process page data",
>> +              count * (sizeof(*mfns) + sizeof(*map_errs)));
>> +        goto err;
>> +    }
>> +
>> +    rc = populate_pfns(ctx, count, pfns, types);
>> +    if ( rc )
>> +    {
>> +        ERROR("Failed to populate pfns for batch of %u pages", count);
>> +        goto err;
>> +    }
>> +    rc = -1;
>> +
>> +    for ( i = 0, nr_pages = 0; i < count; ++i )
>> +    {
>> +        ctx->ops.set_page_type(ctx, pfns[i], types[i]);
>> +
>> +        switch ( types[i] )
>> +        {
>> +        case XEN_DOMCTL_PFINFO_NOTAB:
>> +
>> +        case XEN_DOMCTL_PFINFO_L1TAB:
>> +        case XEN_DOMCTL_PFINFO_L1TAB | XEN_DOMCTL_PFINFO_LPINTAB:
>> +
>> +        case XEN_DOMCTL_PFINFO_L2TAB:
>> +        case XEN_DOMCTL_PFINFO_L2TAB | XEN_DOMCTL_PFINFO_LPINTAB:
>> +
>> +        case XEN_DOMCTL_PFINFO_L3TAB:
>> +        case XEN_DOMCTL_PFINFO_L3TAB | XEN_DOMCTL_PFINFO_LPINTAB:
>> +
>> +        case XEN_DOMCTL_PFINFO_L4TAB:
>> +        case XEN_DOMCTL_PFINFO_L4TAB | XEN_DOMCTL_PFINFO_LPINTAB:
>> +
>> +            mfns[nr_pages++] = ctx->ops.pfn_to_gfn(ctx, pfns[i]);
>> +            break;
>> +        }
>> +
>> +    }
>> +
>> +    if ( nr_pages > 0 )
>> +    {
>> +        mapping = guest_page = xc_map_foreign_bulk(
>> +            xch, ctx->domid, PROT_READ | PROT_WRITE,
>> +            mfns, map_errs, nr_pages);
>> +        if ( !mapping )
>> +        {
>> +            PERROR("Unable to map %u mfns for %u pages of data",
>> +                   nr_pages, count);
>> +            goto err;
>> +        }
>> +    }
>> +
>> +    for ( i = 0, j = 0; i < count; ++i )
>> +    {
>> +        switch ( types[i] )
>> +        {
>> +        case XEN_DOMCTL_PFINFO_XTAB:
>> +        case XEN_DOMCTL_PFINFO_BROKEN:
>> +        case XEN_DOMCTL_PFINFO_XALLOC:
>> +            /* No page data to deal with. */
>> +            continue;
>> +        }
>> +
>> +        if ( map_errs[j] )
>> +        {
>> +            ERROR("Mapping pfn %lx (mfn %lx, type %#"PRIx32")failed
>> with %d",
>> +                  pfns[i], mfns[j], types[i], map_errs[j]);
>
> missing rc = -1 here, rc could be 0 because in the following called:
>     rc = ctx->restore.ops.localise_page(ctx, types[i], guest_page);

So it can - well spotted.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.