|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] vNUMA: rename interface structures
On Tue, Nov 25, 2014 at 02:11:39PM +0000, Jan Beulich wrote:
> >>> On 25.11.14 at 13:36, <JBeulich@xxxxxxxx> wrote:
> > No-one (including me) paid attention during review that these
> > structures don't adhere to the naming requirements of the public
> > interface: Consistently use xen_ prefixes at least for all new
> > additions.
> >
> > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
>
> Sorry, again forgot to Cc you (for 4.5): No functional change, but
> avoiding a later (incompatible) interface change.
Release-Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
>
> Jan
>
> > --- a/tools/libxc/include/xenctrl.h
> > +++ b/tools/libxc/include/xenctrl.h
> > @@ -1264,7 +1264,7 @@ int xc_domain_setvnuma(xc_interface *xch
> > uint32_t nr_vnodes,
> > uint32_t nr_regions,
> > uint32_t nr_vcpus,
> > - vmemrange_t *vmemrange,
> > + xen_vmemrange_t *vmemrange,
> > unsigned int *vdistance,
> > unsigned int *vcpu_to_vnode,
> > unsigned int *vnode_to_pnode);
> > --- a/tools/libxc/xc_domain.c
> > +++ b/tools/libxc/xc_domain.c
> > @@ -2171,7 +2171,7 @@ int xc_domain_setvnuma(xc_interface *xch
> > uint32_t nr_vnodes,
> > uint32_t nr_vmemranges,
> > uint32_t nr_vcpus,
> > - vmemrange_t *vmemrange,
> > + xen_vmemrange_t *vmemrange,
> > unsigned int *vdistance,
> > unsigned int *vcpu_to_vnode,
> > unsigned int *vnode_to_pnode)
> > --- a/xen/common/domctl.c
> > +++ b/xen/common/domctl.c
> > @@ -345,7 +345,7 @@ static struct vnuma_info *vnuma_alloc(un
> > vnuma->vdistance = xmalloc_array(unsigned int, nr_vnodes * nr_vnodes);
> > vnuma->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus);
> > vnuma->vnode_to_pnode = xmalloc_array(unsigned int, nr_vnodes);
> > - vnuma->vmemrange = xmalloc_array(vmemrange_t, nr_ranges);
> > + vnuma->vmemrange = xmalloc_array(xen_vmemrange_t, nr_ranges);
> >
> > if ( vnuma->vdistance == NULL || vnuma->vmemrange == NULL ||
> > vnuma->vcpu_to_vnode == NULL || vnuma->vnode_to_pnode == NULL )
> > --- a/xen/common/memory.c
> > +++ b/xen/common/memory.c
> > @@ -972,7 +972,7 @@ long do_memory_op(unsigned long cmd, XEN
> >
> > case XENMEM_get_vnumainfo:
> > {
> > - struct vnuma_topology_info topology;
> > + struct xen_vnuma_topology_info topology;
> > struct domain *d;
> > unsigned int dom_vnodes, dom_vranges, dom_vcpus;
> > struct vnuma_info tmp;
> > @@ -1033,7 +1033,7 @@ long do_memory_op(unsigned long cmd, XEN
> > read_unlock(&d->vnuma_rwlock);
> >
> > tmp.vdistance = xmalloc_array(unsigned int, dom_vnodes *
> > dom_vnodes);
> > - tmp.vmemrange = xmalloc_array(vmemrange_t, dom_vranges);
> > + tmp.vmemrange = xmalloc_array(xen_vmemrange_t, dom_vranges);
> > tmp.vcpu_to_vnode = xmalloc_array(unsigned int, dom_vcpus);
> >
> > if ( tmp.vdistance == NULL ||
> > --- a/xen/include/public/domctl.h
> > +++ b/xen/include/public/domctl.h
> > @@ -980,7 +980,7 @@ struct xen_domctl_vnuma {
> > /*
> > * memory rages for each vNUMA node
> > */
> > - XEN_GUEST_HANDLE_64(vmemrange_t) vmemrange;
> > + XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
> > };
> > typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
> > DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
> > --- a/xen/include/public/memory.h
> > +++ b/xen/include/public/memory.h
> > @@ -530,14 +530,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_
> > #define XENMEM_get_vnumainfo 26
> >
> > /* vNUMA node memory ranges */
> > -struct vmemrange {
> > +struct xen_vmemrange {
> > uint64_t start, end;
> > unsigned int flags;
> > unsigned int nid;
> > };
> > -
> > -typedef struct vmemrange vmemrange_t;
> > -DEFINE_XEN_GUEST_HANDLE(vmemrange_t);
> > +typedef struct xen_vmemrange xen_vmemrange_t;
> > +DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
> >
> > /*
> > * vNUMA topology specifies vNUMA node number, distance table,
> > @@ -548,7 +547,7 @@ DEFINE_XEN_GUEST_HANDLE(vmemrange_t);
> > * copied back to guest. Domain returns expected values of nr_vnodes,
> > * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
> > */
> > -struct vnuma_topology_info {
> > +struct xen_vnuma_topology_info {
> > /* IN */
> > domid_t domid;
> > uint16_t pad;
> > @@ -566,12 +565,12 @@ struct vnuma_topology_info {
> > uint64_t pad;
> > } vcpu_to_vnode;
> > union {
> > - XEN_GUEST_HANDLE(vmemrange_t) h;
> > + XEN_GUEST_HANDLE(xen_vmemrange_t) h;
> > uint64_t pad;
> > } vmemrange;
> > };
> > -typedef struct vnuma_topology_info vnuma_topology_info_t;
> > -DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t);
> > +typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
> > +DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
> >
> > /* Next available subop number is 27 */
> >
> > --- a/xen/include/xen/domain.h
> > +++ b/xen/include/xen/domain.h
> > @@ -100,7 +100,7 @@ struct vnuma_info {
> > unsigned int *vdistance;
> > unsigned int *vcpu_to_vnode;
> > unsigned int *vnode_to_pnode;
> > - struct vmemrange *vmemrange;
> > + struct xen_vmemrange *vmemrange;
> > };
> >
> > void vnuma_destroy(struct vnuma_info *vnuma);
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |