xense-devel
Re: [Xen-devel][Xense-devel][PATCH][1/4] Xen Security Modules: XSM
Thanks, I looked at that quickly, only comment I had there was it
could be:
struct xsm_ops {
generic ones...
struct xsm_arch_ops arch_ops;
}
I guess I really don't see how this makes it any better since there
will likely always be a mix of XSM "supported" ops and module specific
ops. If you are trying to tighten down the interface, any command,
support or specific could be abused. Please remind me what advantage
you are thinking about here.
To avoid a bunch of ifdefs there. Some of them looked arch neutral
(like add_to_physmap), but I can see they aren't exactly.
Still be nice to make the do_xsm_op hypercall be more than
direct pass-thru to module.
Other than that, a quick review got me looking at evtchn calls.
> diff -r e370c94bd6fd -r 62b752969edf xen/common/event_channel.c
> --- a/xen/common/event_channel.c Sat May 05 13:48:05 2007 +0100
> +++ b/xen/common/event_channel.c Mon May 07 14:50:16 2007 -0400
> @@ -30,6 +30,7 @@
> #include <public/xen.h>
> #include <public/event_channel.h>
> #include <acm/acm_hooks.h>
> +#include <xsm/xsm.h>
>
> #define bucket_from_port(d,p) \
> ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
> @@ -89,8 +90,15 @@ static int get_free_port(struct domain *
> chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
> if ( unlikely(chn == NULL) )
> return -ENOMEM;
> +
> memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
> bucket_from_port(d, port) = chn;
> +
> + if ( xsm_alloc_security_evtchn(chn) )
> + {
> + xfree(chn);
> + return -ENOMEM;
> + }
Oops, this is a domain triggerable use-after free bug-in-waiting.
Now the bucket is perceived valid, but the memory isn't.
In fact, this is not the right interface. You are only allocating the
an opaque security blob per bucket, not per channel. In effect it's like:
struct evtchn chn[EVTCHNS_PER_BUCKET];
xsm_alloc_security(&chn[0]);
When I believe you want smth effectively like:
struct evtchn chn[EVTCHNS_PER_BUCKET];
for (i=0; i < EVTCHNS_PER_BUCKET; i++)
xsm_alloc_security(&chn[i]);
> return port;
> }
This is a mistake in the framework because the behavior between alloc
and free is inconsistent. The Flask module does this correctly, but
this is a gotcha for a new module. I'll fix this and repost tomorrow.
> @@ -120,6 +128,10 @@ static long evtchn_alloc_unbound(evtchn_
> if ( (port = get_free_port(d)) < 0 )
> ERROR_EXIT(port);
> chn = evtchn_from_port(d, port);
> +
> + rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
> + if ( rc )
> + goto out;
>
> chn->state = ECS_UNBOUND;
> if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
> @@ -176,6 +188,10 @@ static long evtchn_bind_interdomain(evtc
> if ( (rchn->state != ECS_UNBOUND) ||
> (rchn->u.unbound.remote_domid != ld->domain_id) )
> ERROR_EXIT(-EINVAL);
> +
> + rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
> + if ( rc )
> + goto out;
>
> lchn->u.interdomain.remote_dom = rd;
> lchn->u.interdomain.remote_port = (u16)rport;
> @@ -231,6 +247,7 @@ static long evtchn_bind_virq(evtchn_bind
> ERROR_EXIT(port);
>
> chn = evtchn_from_port(d, port);
> +
> chn->state = ECS_VIRQ;
> chn->notify_vcpu_id = vcpu;
> chn->u.virq = virq;
> @@ -261,14 +278,15 @@ static long evtchn_bind_ipi(evtchn_bind_
> ERROR_EXIT(port);
>
> chn = evtchn_from_port(d, port);
> +
> chn->state = ECS_IPI;
> chn->notify_vcpu_id = vcpu;
>
> bind->port = port;
>
> + spin_unlock(&d->evtchn_lock);
> +
> out:
> - spin_unlock(&d->evtchn_lock);
> -
This is incorrect, leaves domain locked on error path (yes, ERROR_EXIT
is mean macro abuse!).
Absolutely this is a problem, when did ERROR_EXIT come about?
> return rc;
> }
>
> @@ -427,6 +445,8 @@ static long __evtchn_close(struct domain
> chn1->state = ECS_FREE;
> chn1->notify_vcpu_id = 0;
>
> + xsm_evtchn_close_post(chn1);
> +
> out:
> if ( d2 != NULL )
> {
> @@ -470,6 +490,10 @@ long evtchn_send(unsigned int lport)
> spin_unlock(&ld->evtchn_lock);
> return -EINVAL;
> }
> +
> + ret = xsm_evtchn_send(ld, lchn);
> + if ( ret )
> + goto out;
>
> switch ( lchn->state )
> {
> @@ -500,6 +524,7 @@ long evtchn_send(unsigned int lport)
> ret = -EINVAL;
> }
>
> +out:
> spin_unlock(&ld->evtchn_lock);
>
> return ret;
> @@ -618,6 +643,11 @@ static long evtchn_status(evtchn_status_
> }
>
> chn = evtchn_from_port(d, port);
> +
> + rc = xsm_evtchn_status(d, chn);
> + if ( rc )
> + goto out;
> +
> switch ( chn->state )
> {
> case ECS_FREE:
> @@ -714,6 +744,8 @@ static long evtchn_unmask(evtchn_unmask_
> shared_info_t *s = d->shared_info;
> int port = unmask->port;
> struct vcpu *v;
> + int ret = 0;
> + struct evtchn *chn;
>
> spin_lock(&d->evtchn_lock);
>
> @@ -723,7 +755,8 @@ static long evtchn_unmask(evtchn_unmask_
> return -EINVAL;
> }
>
> - v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
> + chn = evtchn_from_port(d, port);
> + v = d->vcpu[chn->notify_vcpu_id];
>
> /*
> * These operations must happen in strict order. Based on
> @@ -739,7 +772,7 @@ static long evtchn_unmask(evtchn_unmask_
>
> spin_unlock(&d->evtchn_lock);
>
> - return 0;
> + return ret;
> }
>
>
> @@ -748,6 +781,7 @@ static long evtchn_reset(evtchn_reset_t
> domid_t dom = r->dom;
> struct domain *d;
> int i;
> + int rc;
>
> if ( dom == DOMID_SELF )
> dom = current->domain->domain_id;
> @@ -757,6 +791,13 @@ static long evtchn_reset(evtchn_reset_t
> if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
> return -ESRCH;
>
> + rc = xsm_evtchn_reset(current->domain, d);
> + if ( rc )
> + {
> + rcu_unlock_domain(d);
> + return rc;
> + }
> +
> for ( i = 0; port_is_valid(d, i); i++ )
> (void)__evtchn_close(d, i);
>
> @@ -948,10 +989,14 @@ void notify_via_xen_event_channel(int lp
>
> int evtchn_init(struct domain *d)
> {
> + struct evtchn *chn;
> +
> spin_lock_init(&d->evtchn_lock);
> if ( get_free_port(d) != 0 )
> return -EINVAL;
> - evtchn_from_port(d, 0)->state = ECS_RESERVED;
> + chn = evtchn_from_port(d, 0);
> + chn->state = ECS_RESERVED;
> +
> return 0;
> }
>
> @@ -967,7 +1012,10 @@ void evtchn_destroy(struct domain *d)
> }
>
> for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
> + {
> + xsm_free_security_evtchn(d->evtchn[i]);
Yeah, like this. Got it right on destroy.
> xfree(d->evtchn[i]);
> + }
> }
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
|
|