[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC v1 56/74] xen/pvshim: add grant table operations



>>> On 04.01.18 at 14:06, <wei.liu2@xxxxxxxxxx> wrote:
> @@ -30,11 +31,17 @@
>  #include <asm/guest.h>
>  #include <asm/pv/mm.h>
>  
> +#include <compat/grant_table.h>

Interesting: The event channel patch gave me the impression that
it is not intended to deal with 32-bit guests.

> @@ -360,6 +367,173 @@ void pv_shim_inject_evtchn(unsigned int port)
>      }
>  }
>  
> +long pv_shim_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) 
> uop,
> +                            unsigned int count, bool compat)
> +{
> +    struct domain *d = current->domain;
> +    long rc = 0;
> +
> +    if ( count != 1 )
> +        return -EINVAL;
> +
> +    switch ( cmd )
> +    {
> +    case GNTTABOP_setup_table:
> +    {
> +        struct gnttab_setup_table nat;
> +        struct compat_gnttab_setup_table cmp;
> +        unsigned int i;
> +
> +        if ( unlikely(compat ? copy_from_guest(&cmp, uop, 1)
> +                             : copy_from_guest(&nat, uop, 1)) ||
> +             unlikely(compat ? !compat_handle_okay(cmp.frame_list,
> +                                                   cmp.nr_frames)
> +                             : !guest_handle_okay(nat.frame_list,
> +                                                  nat.nr_frames)) )
> +        {
> +            rc = -EFAULT;
> +            break;
> +        }
> +        if ( compat )
> +#define XLAT_gnttab_setup_table_HNDL_frame_list(d, s)
> +                XLAT_gnttab_setup_table(&nat, &cmp);
> +#undef XLAT_gnttab_setup_table_HNDL_frame_list
> +
> +        nat.status = GNTST_okay;
> +
> +        spin_lock(&grant_lock);
> +        if ( !nr_grant_list )
> +        {
> +            struct gnttab_query_size query_size = {
> +                .dom = DOMID_SELF,
> +            };
> +
> +            rc = xen_hypercall_grant_table_op(GNTTABOP_query_size,
> +                                              &query_size, 1);
> +            if ( rc )
> +            {
> +                spin_unlock(&grant_lock);
> +                break;
> +            }
> +
> +            ASSERT(!grant_frames);
> +            grant_frames = xzalloc_array(unsigned long,
> +                                         query_size.max_nr_frames);

Hmm, such runtime allocations (especially when the amount can
be large) are a fundamental problem. I think this needs setting
up before the guest is started.

> +            if ( !grant_frames )
> +            {
> +                spin_unlock(&grant_lock);
> +                rc = -ENOMEM;
> +                break;
> +            }
> +
> +            nr_grant_list = query_size.max_nr_frames;
> +        }
> +
> +        if ( nat.nr_frames > nr_grant_list )
> +        {
> +            spin_unlock(&grant_lock);
> +            rc = -EINVAL;
> +            break;
> +        }
> +
> +        for ( i = 0; i < nat.nr_frames; i++ )
> +        {
> +            if ( !grant_frames[i] )
> +            {
> +                struct xen_add_to_physmap xatp = {
> +                    .domid = DOMID_SELF,
> +                    .idx = i,
> +                    .space = XENMAPSPACE_grant_table,
> +                };
> +                mfn_t mfn;
> +
> +                rc = hypervisor_alloc_unused_page(&mfn);
> +                if ( rc )
> +                {
> +                    gprintk(XENLOG_ERR,
> +                            "unable to get memory for grant table\n");
> +                    break;
> +                }
> +
> +                xatp.gpfn = mfn_x(mfn);
> +                rc = xen_hypercall_memory_op(XENMEM_add_to_physmap, &xatp);
> +                if ( rc )
> +                {
> +                    hypervisor_free_unused_page(mfn);
> +                    break;
> +                }
> +
> +                BUG_ON(iomem_permit_access(d, mfn_x(mfn), mfn_x(mfn)));
> +                grant_frames[i] = mfn_x(mfn);
> +            }
> +
> +            ASSERT(grant_frames[i]);
> +            if ( compat )
> +            {
> +                compat_pfn_t pfn = grant_frames[i];
> +
> +                if ( __copy_to_compat_offset(cmp.frame_list, i, &pfn, 1) )
> +                {
> +                    nat.status = GNTST_bad_virt_addr;
> +                    rc = -EFAULT;
> +                    break;
> +                }
> +            }
> +            else if ( __copy_to_guest_offset(nat.frame_list, i,
> +                                             &grant_frames[i], 1) )
> +            {
> +                nat.status = GNTST_bad_virt_addr;
> +                rc = -EFAULT;
> +                break;
> +            }
> +        }
> +        spin_unlock(&grant_lock);
> +
> +        if ( compat )
> +#define XLAT_gnttab_setup_table_HNDL_frame_list(d, s)
> +                XLAT_gnttab_setup_table(&cmp, &nat);
> +#undef XLAT_gnttab_setup_table_HNDL_frame_list
> +
> +        if ( unlikely(compat ? copy_to_guest(uop, &cmp, 1)
> +                             : copy_to_guest(uop, &nat, 1)) )

__copy_to_guest()

> +        {
> +            rc = -EFAULT;
> +            break;
> +        }
> +
> +        break;
> +    }
> +    case GNTTABOP_query_size:

Blank line above such "case" please.

> +    {
> +        struct gnttab_query_size op;
> +        int rc;
> +
> +        if ( unlikely(copy_from_guest(&op, uop, 1)) )
> +        {
> +            rc = -EFAULT;
> +            break;
> +        }
> +
> +        rc = xen_hypercall_grant_table_op(GNTTABOP_query_size, &op, count);
> +        if ( rc )
> +            break;
> +
> +        if ( copy_to_guest(uop, &op, 1) )

__copy_to_guest() (assuming this coping in and out is necessary
in the first place).

> +        {
> +            rc = -EFAULT;
> +            break;
> +        }
> +
> +        break;
> +    }
> +    default:
> +        rc = -ENOSYS;

-EOPNOTSUPP again please. Plus - what about other sub-ops?

> @@ -3324,6 +3328,12 @@ do_grant_table_op(
>      if ( (cmd &= GNTTABOP_CMD_MASK) != GNTTABOP_cache_flush && opaque_in )
>          return -EINVAL;
>  
> +#ifdef CONFIG_X86
> +    if ( pv_shim )
> +        /* NB: no continuation support for pv-shim ops. */
> +        return pv_shim_grant_table_op(cmd, uop, count, false);
> +#endif

As for event channels - patch it right into the hypercall table?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.