[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 09/15] argo: implement the sendv op; evtchn: expose send_guest_global_virq



On Mon, Jan 7, 2019 at 2:43 AM Christopher Clark
<christopher.w.clark@xxxxxxxxx> wrote:

<snip>

> @@ -342,6 +357,413 @@ update_tx_ptr(struct argo_ring_info *ring_info, 
> uint32_t tx_ptr)
>      smp_wmb();
>  }
>
> +static int
> +memcpy_to_guest_ring(struct argo_ring_info *ring_info, uint32_t offset,
> +                     const void *src, XEN_GUEST_HANDLE(uint8_t) src_hnd,
> +                     uint32_t len)
> +{
> +    unsigned int mfns_index = offset >> PAGE_SHIFT;
> +    void *dst;
> +    int ret;
> +    unsigned int src_offset = 0;
> +
> +    ASSERT(spin_is_locked(&ring_info->lock));
> +
> +    offset &= ~PAGE_MASK;
> +
> +    if ( (len > XEN_ARGO_MAX_RING_SIZE) || (offset > XEN_ARGO_MAX_RING_SIZE) 
> )
> +        return -EFAULT;
> +
> +    while ( (offset + len) > PAGE_SIZE )
> +    {
> +        unsigned int head_len = PAGE_SIZE - offset;

I think this while loop could be re-written as
while (len) {
    head_len = len > PAGE_SIZE ? PAGE_SIZE - offset: len;

and then the extra copying below outside the loop could be dropped.

The first loop does a partial copy at offset and then sets offset=0.
The next N loops copy exactly PAGE_SIZE.
The Final copy does the remaining len bytes.

> +
> +        ret = ring_map_page(ring_info, mfns_index, &dst);
> +        if ( ret )
> +            return ret;
> +
> +        if ( src )
> +        {
> +            memcpy(dst + offset, src + src_offset, head_len);
> +            src_offset += head_len;
> +        }
> +        else
> +        {
> +            ret = copy_from_guest(dst + offset, src_hnd, head_len) ?
> +                    -EFAULT : 0;
> +            if ( ret )
> +                return ret;
> +
> +            guest_handle_add_offset(src_hnd, head_len);
> +        }
> +
> +        mfns_index++;
> +        len -= head_len;
> +        offset = 0;
> +    }
> +
> +    ret = ring_map_page(ring_info, mfns_index, &dst);
> +    if ( ret )
> +    {
> +        argo_dprintk("argo: ring (vm%u:%x vm%d) %p attempted to map page"
> +                     " %d of %d\n", ring_info->id.domain_id, 
> ring_info->id.port,
> +                     ring_info->id.partner_id, ring_info, mfns_index,
> +                     ring_info->nmfns);
> +        return ret;
> +    }
> +
> +    if ( src )
> +        memcpy(dst + offset, src + src_offset, len);
> +    else
> +        ret = copy_from_guest(dst + offset, src_hnd, len) ? -EFAULT : 0;
> +
> +    return ret;
> +}

<snip>

> +
> +/*
> + * iov_count returns its count on success via an out variable to avoid
> + * potential for a negative return value to be used incorrectly
> + * (eg. coerced into an unsigned variable resulting in a large incorrect 
> value)
> + */
> +static int
> +iov_count(const xen_argo_iov_t *piov, unsigned long niov, uint32_t *count)
> +{
> +    uint32_t sum_iov_lens = 0;
> +
> +    if ( niov > XEN_ARGO_MAXIOV )
> +        return -EINVAL;
> +
> +    while ( niov-- )
> +    {
> +        /* valid iovs must have the padding field set to zero */
> +        if ( piov->pad )
> +        {
> +            argo_dprintk("invalid iov: padding is not zero\n");
> +            return -EINVAL;
> +        }
> +
> +        /* check each to protect sum against integer overflow */
> +        if ( piov->iov_len > XEN_ARGO_MAX_RING_SIZE )

Should this be MAX_ARGO_MESSAGE_SIZE?  MAX_ARGO_MESSAGE_SIZE is less
than XEN_ARGO_MAX_RING_SIZE, so we can pass this check and then just
fail the one below.

> +        {
> +            argo_dprintk("invalid iov_len: too big (%u)>%llu\n",
> +                         piov->iov_len, XEN_ARGO_MAX_RING_SIZE);
> +            return -EINVAL;
> +        }
> +
> +        sum_iov_lens += piov->iov_len;
> +
> +        /*
> +         * Again protect sum from integer overflow
> +         * and ensure total msg size will be within bounds.
> +         */
> +        if ( sum_iov_lens > MAX_ARGO_MESSAGE_SIZE )
> +        {
> +            argo_dprintk("invalid iov series: total message too big\n");
> +            return -EMSGSIZE;
> +        }
> +
> +        piov++;
> +    }
> +
> +    *count = sum_iov_lens;
> +
> +    return 0;
> +}
> +

<snip>

> @@ -1073,6 +1683,49 @@ do_argo_op(unsigned int cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg1,
>          break;
>      }
>
> +    case XEN_ARGO_OP_sendv:
> +    {
> +        xen_argo_send_addr_t send_addr;
> +
> +        XEN_GUEST_HANDLE_PARAM(xen_argo_send_addr_t) send_addr_hnd =
> +            guest_handle_cast(arg1, xen_argo_send_addr_t);
> +        XEN_GUEST_HANDLE_PARAM(xen_argo_iov_t) iovs_hnd =
> +            guest_handle_cast(arg2, xen_argo_iov_t);
> +        /* arg3 is niov */
> +        /* arg4 is message_type. Must be a 32-bit value. */
> +
> +        rc = copy_from_guest(&send_addr, send_addr_hnd, 1) ? -EFAULT : 0;
> +        if ( rc )
> +            break;
> +
> +        if ( send_addr.src.domain_id == XEN_ARGO_DOMID_ANY )
> +            send_addr.src.domain_id = currd->domain_id;
> +
> +        /* No domain is currently authorized to send on behalf of another */
> +        if ( unlikely(send_addr.src.domain_id != currd->domain_id) )
> +        {
> +            rc = -EPERM;
> +            break;
> +        }
> +
> +        /* Reject niov or message_type values that are outside 32 bit range. 
> */
> +        if ( unlikely((arg3 > XEN_ARGO_MAXIOV) || (arg4 & ~0xffffffffUL)) )
> +        {
> +            rc = -EINVAL;
> +            break;
> +        }

This needs to check send_addr.src.pad and send_addr.dst.pad == 0.
sendv() does not check the padding either.

Regards,
Jason

> +
> +        /*
> +         * Check access to the whole array here so we can use the faster 
> __copy
> +         * operations to read each element later.
> +         */
> +        if ( unlikely(!guest_handle_okay(iovs_hnd, arg3)) )
> +            break;
> +
> +        rc = sendv(currd, &send_addr.src, &send_addr.dst, iovs_hnd, arg3, 
> arg4);
> +        break;
> +    }
> +
>      default:
>          rc = -EOPNOTSUPP;
>          break;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.