[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 2/7] xen/vnuma: domctl subop for vnuma setup.



>>> On 27.08.13 at 09:54, Elena Ufimtseva <ufimtseva@xxxxxxxxx> wrote:
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -227,6 +227,11 @@ struct domain *domain_create(
>      spin_lock_init(&d->node_affinity_lock);
>      d->node_affinity = NODE_MASK_ALL;
>      d->auto_node_affinity = 1;
> +    d->vnuma.vnuma_memblks = NULL;
> +    d->vnuma.vnode_to_pnode = NULL;
> +    d->vnuma.vcpu_to_vnode = NULL;
> +    d->vnuma.vdistance = NULL;
> +    d->vnuma.nr_vnodes = 0;

Pretty pointless considering that struct domain starts out from a
zeroed page.

> @@ -532,6 +537,7 @@ int domain_kill(struct domain *d)
>          tmem_destroy(d->tmem);
>          domain_set_outstanding_pages(d, 0);
>          d->tmem = NULL;
> +        /* TODO: vnuma_destroy(d->vnuma); */

That's intended to go away by the time the RFC tag gets dropped?

> @@ -862,7 +863,76 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
> u_domctl)
>          ret = set_global_virq_handler(d, virq);
>      }
>      break;
> -
> +    case XEN_DOMCTL_setvnumainfo:
> +    {
> +        int i, j;
> +        int dist_size;
> +        int dist, vmap, vntop;

unsigned, unsigned, unsigned.

> +        vnuma_memblk_t vmemblk;
> +        
> +        ret = -EFAULT;
> +        dist = i = j = 0;
> +        if (op->u.vnuma.nr_vnodes <= 0 || op->u.vnuma.nr_vnodes > NR_CPUS)
> +            break;

-EFAULT seems inappropriate here.

> +        d->vnuma.nr_vnodes = op->u.vnuma.nr_vnodes;
> +        dist_size = d->vnuma.nr_vnodes * d->vnuma.nr_vnodes;
> +        if ( (d->vnuma.vdistance = xmalloc_bytes(sizeof(*d->vnuma.vdistance) 
> * dist_size) ) == NULL)
> +           break;
> +        for ( i = 0; i < d->vnuma.nr_vnodes; i++ )
> +            for ( j = 0; j < d->vnuma.nr_vnodes; j++ )
> +            {
> +                if ( unlikely(__copy_from_guest_offset(&dist, 
> op->u.vnuma.vdistance, __vnode_distance_offset(d, i, j), 1)) )

Long line.

> +                {
> +                    gdprintk(XENLOG_INFO, "vNUMA: Copy distance table 
> error\n");
> +                    goto err_dom;
> +                }
> +                __vnode_distance_set(d, i, j, dist);
> +            }
> +        if ( (d->vnuma.vnuma_memblks = 
> xmalloc_bytes(sizeof(*d->vnuma.vnuma_memblks) * d->vnuma.nr_vnodes)) == NULL )

Again.

> +            goto err_dom;
> +        for ( i = 0; i < d->vnuma.nr_vnodes; i++ )
> +        {
> +            if ( unlikely(__copy_from_guest_offset(&vmemblk, 
> op->u.vnuma.vnuma_memblks, i, 1)) )
> +            {
> +                gdprintk(XENLOG_INFO, "vNUMA: memory size error\n");

Just like for the earlier patch - the many formal problems make it quite
hard to review the actual code.

> @@ -852,6 +853,17 @@ struct xen_domctl_set_broken_page_p2m {
>  typedef struct xen_domctl_set_broken_page_p2m 
> xen_domctl_set_broken_page_p2m_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
>  
> +struct xen_domctl_vnuma {
> +    uint16_t nr_vnodes;
> +    XEN_GUEST_HANDLE_64(int) vdistance;
> +    XEN_GUEST_HANDLE_64(vnuma_memblk_t) vnuma_memblks;
> +    XEN_GUEST_HANDLE_64(int) vcpu_to_vnode;
> +    XEN_GUEST_HANDLE_64(int) vnode_to_pnode;

uint, uint, uint.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.