[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2] domctl: relax getdomaininfo permissions
>>> On 05.08.16 at 13:20, <JBeulich@xxxxxxxx> wrote: Daniel, I've only now realized that I forgot to Cc you on this v2. Jan > Qemu needs access to this for the domain it controls, both due to it > being used by xc_domain_memory_mapping() (which qemu calls) and the > explicit use in hw/xenpv/xen_domainbuild.c:xen_domain_poll(). Extend > permissions to that of any "ordinary" domctl: A domain controlling the > targeted domain can invoke this operation for that target domain (which > is being achieved by no longer passing NULL to xsm_domctl()). > > This at once avoids a for_each_domain() loop when the ID of an > existing domain gets passed in. > > Reported-by: Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx> > Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> > --- > v2: Add a comment. Clarify description as to what additional permission > is being granted. > --- > I know there had been an alternative patch suggestion, but that one > doesn't seem have seen a formal submission so far, so here is my > original proposal. > > I wonder what good the duplication of the returned domain ID does: I'm > tempted to remove the one in the command-specific structure. Does > anyone have insight into why it was done that way? > > I further wonder why we have XSM_OTHER: The respective conversion into > other XSM_* values in xsm/dummy.h could as well move into the callers, > making intentions more obvious when looking at the actual code. > > --- a/tools/flask/policy/modules/xen.if > +++ b/tools/flask/policy/modules/xen.if > @@ -149,7 +149,7 @@ define(`device_model', ` > create_channel($2, $1, $2_channel) > allow $1 $2_channel:event create; > > - allow $1 $2_target:domain shutdown; > + allow $1 $2_target:domain { getdomaininfo shutdown }; > allow $1 $2_target:mmu { map_read map_write adjust physmap target_hack > }; > allow $1 $2_target:hvm { getparam setparam trackdirtyvram hvmctl > irqlevel > pciroute pcilevel cacheattr send_irq }; > ') > --- a/xen/common/domctl.c > +++ b/xen/common/domctl.c > @@ -396,14 +396,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > switch ( op->cmd ) > { > case XEN_DOMCTL_createdomain: > - case XEN_DOMCTL_getdomaininfo: > case XEN_DOMCTL_test_assign_device: > case XEN_DOMCTL_gdbsx_guestmemio: > d = NULL; > break; > default: > d = rcu_lock_domain_by_id(op->domain); > - if ( d == NULL ) > + if ( !d && op->cmd != XEN_DOMCTL_getdomaininfo ) > return -ESRCH; > } > > @@ -817,14 +816,22 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > > case XEN_DOMCTL_getdomaininfo: > { > - domid_t dom = op->domain; > - > - rcu_read_lock(&domlist_read_lock); > + domid_t dom = DOMID_INVALID; > > - for_each_domain ( d ) > - if ( d->domain_id >= dom ) > + if ( !d ) > + { > + ret = -EINVAL; > + if ( op->domain >= DOMID_FIRST_RESERVED ) > break; > > + rcu_read_lock(&domlist_read_lock); > + > + dom = op->domain; > + for_each_domain ( d ) > + if ( d->domain_id >= dom ) > + break; > + } > + > ret = -ESRCH; > if ( d == NULL ) > goto getdomaininfo_out; > @@ -839,6 +846,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > copyback = 1; > > getdomaininfo_out: > + /* When d was non-NULL upon entry, no cleanup is needed. */ > + if ( dom == DOMID_INVALID ) > + break; > + > rcu_read_unlock(&domlist_read_lock); > d = NULL; > break; > --- a/xen/include/xsm/dummy.h > +++ b/xen/include/xsm/dummy.h > @@ -61,7 +61,12 @@ static always_inline int xsm_default_act > return 0; > case XSM_TARGET: > if ( src == target ) > + { > return 0; > + case XSM_XS_PRIV: > + if ( src->is_xenstore ) > + return 0; > + } > /* fall through */ > case XSM_DM_PRIV: > if ( target && src->target == target ) > @@ -71,10 +76,6 @@ static always_inline int xsm_default_act > if ( src->is_privileged ) > return 0; > return -EPERM; > - case XSM_XS_PRIV: > - if ( src->is_xenstore || src->is_privileged ) > - return 0; > - return -EPERM; > default: > LINKER_BUG_ON(1); > return -EPERM; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |