[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC 8/9] x86/nested, xsm: add nested_event_channel_op hypercall



Provides proxying to the host hypervisor for these event channel ops:
 * EVTCHNOP_alloc_unbound
 * EVTCHNOP_bind_vcpu
 * EVTCHNOP_close
 * EVTCHNOP_send
 * EVTCHNOP_unmask

Introduces a new XSM access vector class for policy control applied to this
operation: nested_event.
This is required because the existing 'event' access vector is unsuitable
for repurposing to the nested case: it operates on per-channel security
identifiers that are generated from a combination of the security
identifiers of the two communicating endpoints and data is not available for
the remote endpoint in the nested case.

Signed-off-by: Christopher Clark <christopher.clark@xxxxxxxxxx>
---
 tools/flask/policy/modules/dom0.te    |  3 +
 xen/arch/x86/guest/hypercall_page.S   |  1 +
 xen/arch/x86/guest/xen-nested.c       | 84 +++++++++++++++++++++++++++
 xen/arch/x86/hypercall.c              |  1 +
 xen/arch/x86/pv/hypercall.c           |  1 +
 xen/include/public/xen.h              |  1 +
 xen/include/xen/hypercall.h           |  4 ++
 xen/include/xsm/dummy.h               |  8 +++
 xen/include/xsm/xsm.h                 |  8 +++
 xen/xsm/dummy.c                       |  1 +
 xen/xsm/flask/hooks.c                 | 35 +++++++++++
 xen/xsm/flask/policy/access_vectors   | 20 +++++++
 xen/xsm/flask/policy/security_classes |  1 +
 13 files changed, 168 insertions(+)

diff --git a/tools/flask/policy/modules/dom0.te 
b/tools/flask/policy/modules/dom0.te
index 03c93a3093..ba3c5ad63d 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -48,6 +48,9 @@ allow dom0_t nestedxen_t:version { xen_version 
xen_get_features };
 allow dom0_t nestedxen_t:mmu physmap;
 allow dom0_t nestedxen_t:hvm { setparam getparam };
 allow dom0_t nestedxen_t:grant query;
+allow dom0_t nestedxen_t:nested_event {
+    alloc_unbound bind_vcpu close send unmask
+};
 
 # These permissions allow using the FLASK security server to compute access
 # checks locally, which could be used by a domain or service (such as xenstore)
diff --git a/xen/arch/x86/guest/hypercall_page.S 
b/xen/arch/x86/guest/hypercall_page.S
index 33403714ce..64f1885629 100644
--- a/xen/arch/x86/guest/hypercall_page.S
+++ b/xen/arch/x86/guest/hypercall_page.S
@@ -64,6 +64,7 @@ DECLARE_HYPERCALL(nested_xen_version)
 DECLARE_HYPERCALL(nested_memory_op)
 DECLARE_HYPERCALL(nested_hvm_op)
 DECLARE_HYPERCALL(nested_grant_table_op)
+DECLARE_HYPERCALL(nested_event_channel_op)
 
 DECLARE_HYPERCALL(arch_0)
 DECLARE_HYPERCALL(arch_1)
diff --git a/xen/arch/x86/guest/xen-nested.c b/xen/arch/x86/guest/xen-nested.c
index a4049e366f..babf4bf783 100644
--- a/xen/arch/x86/guest/xen-nested.c
+++ b/xen/arch/x86/guest/xen-nested.c
@@ -22,6 +22,7 @@
 #include <xen/lib.h>
 #include <xen/sched.h>
 
+#include <public/event_channel.h>
 #include <public/grant_table.h>
 #include <public/hvm/hvm_op.h>
 #include <public/memory.h>
@@ -239,3 +240,86 @@ long do_nested_grant_table_op(unsigned int cmd,
 
     return ret;
 }
+
+long do_nested_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+    long ret;
+
+    if ( !xen_nested )
+        return -ENOSYS;
+
+    ret = xsm_nested_event_channel_op(XSM_PRIV, current->domain, cmd);
+    if ( ret )
+        return ret;
+
+    switch ( cmd )
+    {
+    case EVTCHNOP_alloc_unbound:
+    {
+        struct evtchn_alloc_unbound alloc_unbound;
+
+        if ( copy_from_guest(&alloc_unbound, arg, 1) )
+            return -EFAULT;
+
+        ret = xen_hypercall_event_channel_op(cmd, &alloc_unbound);
+        if ( !ret && __copy_to_guest(arg, &alloc_unbound, 1) )
+        {
+            struct evtchn_close close;
+
+            ret = -EFAULT;
+            close.port = alloc_unbound.port;
+
+            if ( xen_hypercall_event_channel_op(EVTCHNOP_close, &close) )
+                gprintk(XENLOG_ERR, "Nested event alloc_unbound failed to 
close"
+                                    " port %u on EFAULT\n", 
alloc_unbound.port);
+        }
+        break;
+    }
+
+    case EVTCHNOP_bind_vcpu:
+    {
+       struct evtchn_bind_vcpu bind_vcpu;
+
+        if( copy_from_guest(&bind_vcpu, arg, 1) )
+            return -EFAULT;
+
+        return xen_hypercall_event_channel_op(cmd, &bind_vcpu);
+    }
+
+    case EVTCHNOP_close:
+    {
+        struct evtchn_close close;
+
+        if ( copy_from_guest(&close, arg, 1) )
+            return -EFAULT;
+
+        return xen_hypercall_event_channel_op(cmd, &close);
+    }
+
+    case EVTCHNOP_send:
+    {
+        struct evtchn_send send;
+
+        if ( copy_from_guest(&send, arg, 1) )
+            return -EFAULT;
+
+        return xen_hypercall_event_channel_op(cmd, &send);
+    }
+
+    case EVTCHNOP_unmask:
+    {
+        struct evtchn_unmask unmask;
+
+        if ( copy_from_guest(&unmask, arg, 1) )
+            return -EFAULT;
+
+        return xen_hypercall_event_channel_op(cmd, &unmask);
+    }
+
+    default:
+        gprintk(XENLOG_ERR, "Nested: event hypercall %d not supported.\n", 
cmd);
+        return -EOPNOTSUPP;
+    }
+
+    return ret;
+}
diff --git a/xen/arch/x86/hypercall.c b/xen/arch/x86/hypercall.c
index 1b9f4c6050..752955ac81 100644
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -78,6 +78,7 @@ const hypercall_args_t hypercall_args_table[NR_hypercalls] =
     COMP(nested_memory_op, 2, 2),
     ARGS(nested_hvm_op, 2),
     ARGS(nested_grant_table_op, 3),
+    ARGS(nested_event_channel_op, 2),
 #endif
     ARGS(mca, 1),
     ARGS(arch_1, 1),
diff --git a/xen/arch/x86/pv/hypercall.c b/xen/arch/x86/pv/hypercall.c
index efa1bd0830..6b1ae74d64 100644
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -89,6 +89,7 @@ const hypercall_table_t pv_hypercall_table[] = {
     COMPAT_CALL(nested_memory_op),
     HYPERCALL(nested_hvm_op),
     HYPERCALL(nested_grant_table_op),
+    HYPERCALL(nested_event_channel_op),
 #endif
     HYPERCALL(mca),
     HYPERCALL(arch_1),
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 000b7fc9d0..5fb322e882 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -125,6 +125,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #define __HYPERVISOR_nested_memory_op     43
 #define __HYPERVISOR_nested_hvm_op        44
 #define __HYPERVISOR_nested_grant_table_op 45
+#define __HYPERVISOR_nested_event_channel_op 46
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
diff --git a/xen/include/xen/hypercall.h b/xen/include/xen/hypercall.h
index 102b20fd5f..bd739c2dc7 100644
--- a/xen/include/xen/hypercall.h
+++ b/xen/include/xen/hypercall.h
@@ -167,6 +167,10 @@ extern long do_nested_grant_table_op(
     unsigned int cmd,
     XEN_GUEST_HANDLE_PARAM(void) uop,
     unsigned int count);
+
+extern long do_nested_event_channel_op(
+    int cmd,
+    XEN_GUEST_HANDLE_PARAM(void) arg);
 #endif
 
 #ifdef CONFIG_COMPAT
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index f5871ef05a..f8162f3308 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -768,6 +768,14 @@ static XSM_INLINE int 
xsm_nested_grant_query_size(XSM_DEFAULT_ARG
     XSM_ASSERT_ACTION(XSM_PRIV);
     return xsm_default_action(action, d, NULL);
 }
+
+static XSM_INLINE int xsm_nested_event_channel_op(XSM_DEFAULT_ARG
+                                                  const struct domain *d,
+                                                  unsigned int cmd)
+{
+    XSM_ASSERT_ACTION(XSM_PRIV);
+    return xsm_default_action(action, d, NULL);
+}
 #endif
 
 #include <public/version.h>
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index e12001c401..81cb67b89b 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -192,6 +192,7 @@ struct xsm_operations {
     int (*nested_add_to_physmap) (const struct domain *d);
     int (*nested_hvm_op) (const struct domain *d, unsigned int cmd);
     int (*nested_grant_query_size) (const struct domain *d);
+    int (*nested_event_channel_op) (const struct domain *d, unsigned int cmd);
 #endif
 };
 
@@ -755,6 +756,13 @@ static inline int 
xsm_nested_grant_query_size(xsm_default_t def,
     return xsm_ops->nested_grant_query_size(d);
 }
 
+static inline int xsm_nested_event_channel_op(xsm_default_t def,
+                                              const struct domain *d,
+                                              unsigned int cmd)
+{
+    return xsm_ops->nested_event_channel_op(d, cmd);
+}
+
 #endif /* CONFIG_XEN_NESTED */
 
 #endif /* XSM_NO_WRAPPERS */
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 8c213c258f..91db264ddc 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -162,5 +162,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, nested_add_to_physmap);
     set_to_dummy_if_null(ops, nested_hvm_op);
     set_to_dummy_if_null(ops, nested_grant_query_size);
+    set_to_dummy_if_null(ops, nested_event_channel_op);
 #endif
 }
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 2988df2cd1..27bfa01559 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1794,6 +1794,40 @@ static int flask_nested_grant_query_size(const struct 
domain *d)
     return domain_has_nested_perm(d, SECCLASS_GRANT, GRANT__QUERY);
 }
 
+static int flask_nested_event_channel_op(const struct domain *d,
+                                         unsigned int op)
+{
+    u32 perm;
+
+    switch ( op )
+    {
+    case EVTCHNOP_alloc_unbound:
+        perm = NESTED_EVENT__ALLOC_UNBOUND;
+        break;
+
+    case EVTCHNOP_bind_vcpu:
+        perm = NESTED_EVENT__BIND_VCPU;
+        break;
+
+    case EVTCHNOP_close:
+        perm = NESTED_EVENT__CLOSE;
+        break;
+
+    case EVTCHNOP_send:
+        perm = NESTED_EVENT__SEND;
+        break;
+
+    case EVTCHNOP_unmask:
+        perm = NESTED_EVENT__UNMASK;
+        break;
+
+    default:
+        return avc_unknown_permission("nested event channel op", op);
+    }
+
+    return domain_has_nested_perm(d, SECCLASS_NESTED_EVENT, perm);
+}
+
 #endif
 
 long do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op);
@@ -1940,6 +1974,7 @@ static struct xsm_operations flask_ops = {
     .nested_add_to_physmap = flask_nested_add_to_physmap,
     .nested_hvm_op = flask_nested_hvm_op,
     .nested_grant_query_size = flask_nested_grant_query_size,
+    .nested_event_channel_op = flask_nested_event_channel_op,
 #endif
 };
 
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index 7e0d5aa7bf..87caa36391 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -316,6 +316,26 @@ class event
     reset
 }
 
+# Class nested_event describes event channels to the host hypervisor
+# in a nested Xen-on-Xen system. Policy controls for these differ
+# from the interdomain event channels between guest VMs:
+# the guest hypervisor does not maintain security identifier information about
+# the remote event endpoint managed by the host hypervisor, so nested_event
+# channels do not have their own security label derived from a type transition.
+class nested_event
+{
+    # nested_event_channel_op: EVTCHNOP_alloc_unbound
+    alloc_unbound
+    # nested_event_channel_op: EVTCHNOP_bind_vcpu
+    bind_vcpu
+    # nested_event_channel_op: EVTCHNOP_close
+    close
+    # nested_event_channel_op: EVTCHNOP_send
+    send
+    # nested_event_channel_op: EVTCHNOP_unmask
+    unmask
+}
+
 # Class grant describes pages shared by grant mappings.  Pages use the security
 # label of their owning domain.
 class grant
diff --git a/xen/xsm/flask/policy/security_classes 
b/xen/xsm/flask/policy/security_classes
index 50ecbabc5c..ce5d00df23 100644
--- a/xen/xsm/flask/policy/security_classes
+++ b/xen/xsm/flask/policy/security_classes
@@ -20,5 +20,6 @@ class grant
 class security
 class version
 class argo
+class nested_event
 
 # FLASK
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.