[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v8 6/8] xen/arm: introduce GNTTABOP_cache_flush



>>> On 20.10.14 at 11:48, <stefano.stabellini@xxxxxxxxxxxxx> wrote:
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -488,6 +488,43 @@ static int _set_status(unsigned gt_version,
>          return _set_status_v2(domid, readonly, mapflag, shah, act, status);
>  }
>  
> +#define MAX_GRANT_ENTRIES_ITER_SHIFT 12
> +static int grant_map_exists(const struct domain *ld,
> +                            struct grant_table *rgt,
> +                            unsigned long mfn,
> +                            grant_ref_t *ref_count)
> +{
> +    const struct active_grant_entry *act;
> +    grant_ref_t ref;
> +
> +    ASSERT(spin_is_locked(&rgt->lock));
> +
> +    for ( ref = *ref_count; ref < nr_grant_entries(rgt); ref++ )
> +    {
> +        if ( (ref % (1 << MAX_GRANT_ENTRIES_ITER_SHIFT)) == 0 &&

Wouldn't it be cheaper (not needing calculations on each
iteration) to determine the maximum loop exit condition up front
as min(*ref_count + (1 << MAX_GRANT_ENTRIES_ITER_SHIFT),
nr_grant_entries(rgt))?

>  long
>  do_grant_table_op(
>      unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count)
>  {
>      long rc;
> +    unsigned int opaque_in = 0, opaque_out = 0;
>      
>      if ( (int)count < 0 )
>          return -EINVAL;
>      
>      rc = -EFAULT;
> +
> +    opaque_in = cmd >> GNTTABOP_CONTINUATION_ARG_SHIFT;

This and MAX_GRANT_ENTRIES_ITER_SHIFT need to be identical
(and you really don't need that second manifest constant). With
what you do right now, you shift out 20 bits here, but handle only
12 low bits per iteration (i.e. a total of 24 instead of 32). And
hence you don't need to shift here (only mask), ...

> @@ -2615,17 +2755,37 @@ do_grant_table_op(
>          }
>          break;
>      }
> +    case GNTTABOP_cache_flush:
> +    {
> +        grant_ref_t ref_count = opaque_in << MAX_GRANT_ENTRIES_ITER_SHIFT;

... and neither here ...

> +        XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) cflush =
> +            guest_handle_cast(uop, gnttab_cache_flush_t);
> +        if ( unlikely(!guest_handle_okay(cflush, count)) )
> +            goto out;
> +        rc = gnttab_cache_flush(cflush, &ref_count, count);
> +        if ( rc > 0 )
> +        {
> +            guest_handle_add_offset(cflush, rc);
> +            uop = guest_handle_cast(cflush, void);
> +        }
> +        opaque_out = ref_count >> MAX_GRANT_ENTRIES_ITER_SHIFT;

... nor here ...

> +        break;
> +    }
>      default:
>          rc = -ENOSYS;
>          break;
>      }
>      
>    out:
> -    if ( rc > 0 )
> +    if ( rc > 0 || opaque_out != 0 )
>      {
>          ASSERT(rc < count);
> -        rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op,
> -                                           "ihi", cmd, uop, count - rc);
> +        ASSERT(opaque_out <
> +               (1 << (sizeof(cmd)*8 - GNTTABOP_CONTINUATION_ARG_SHIFT)));
> +        rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, 
> "ihi",
> +                                           (opaque_out <<
> +                                            GNTTABOP_CONTINUATION_ARG_SHIFT) 
> | cmd,

... nor here (but asserting that the low bits are clear is still desirable).

> --- a/xen/include/xen/grant_table.h
> +++ b/xen/include/xen/grant_table.h
> @@ -127,4 +127,7 @@ static inline unsigned int grant_to_status_frames(int 
> grant_frames)
>          GRANT_STATUS_PER_PAGE;
>  }
>  
> +#define GNTTABOP_CONTINUATION_ARG_SHIFT 20
> +#define GNTTABOP_CMD_MASK               
> ((1<<GNTTABOP_CONTINUATION_ARG_SHIFT)-1)

Such internal items should be given as little exposure as possible.
In the case here they can be confined to grant_table.c (since
the compat wrapper code gets included in that file). With "this
needs to be moved" (as said in response to v7) I didn't mean to a
header, I meant only inside the file (to its top), as being not
specific to just some particular function.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.