[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v7 05/10] xsm: add XEN_DOMCTL_soft_reset support



Dummy policy just checks that the current domain is privileged. In the FLASK
policy two new vectors are added to the DOMAIN2 class: soft_reset and
reset_transfer. First one is being used to check that the domain making the
hypercall is allowed to do it, the second is being used that is's possible
to trasfer memory from source domain to destination domain. The default policy
requires their contexts to match.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
Changes in v7:
- Add reset_transfer vector to FLASK [Daniel De Graaf]
- XENMEM_soft_reset -> XEN_DOMCTL_soft_reset
- Add a comment on why we don't do two XSM_TARGET checks in dummy.h [Jan 
Beulich]
---
 tools/flask/policy/policy/modules/xen/xen.if |  3 ++-
 xen/common/domctl.c                          |  7 +++++++
 xen/include/xsm/dummy.h                      | 12 ++++++++++++
 xen/include/xsm/xsm.h                        |  8 ++++++++
 xen/xsm/dummy.c                              |  2 ++
 xen/xsm/flask/hooks.c                        | 18 ++++++++++++++++++
 xen/xsm/flask/policy/access_vectors          |  7 +++++++
 7 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/tools/flask/policy/policy/modules/xen/xen.if 
b/tools/flask/policy/policy/modules/xen/xen.if
index 620d151..0f6ed8a 100644
--- a/tools/flask/policy/policy/modules/xen/xen.if
+++ b/tools/flask/policy/policy/modules/xen/xen.if
@@ -51,13 +51,14 @@ define(`create_domain_common', `
                        getaffinity setaffinity setvcpuextstate };
        allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim
                        set_max_evtchn set_vnumainfo get_vnumainfo cacheflush
-                       psr_cmt_op };
+                       psr_cmt_op soft_reset };
        allow $1 $2:security check_context;
        allow $1 $2:shadow enable;
        allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage 
mmuext_op updatemp };
        allow $1 $2:grant setup;
        allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc
                        setparam pcilevel trackdirtyvram nested };
+       allow $2 $2:domain2 reset_transfer;
 ')
 
 # create_domain(priv, target)
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 2fd21cb..ea7994a 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -727,6 +727,13 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
u_domctl)
             break;
         }
 
+        ret = xsm_soft_reset(XSM_PRIV, d, dest_d);
+        if ( ret )
+        {
+            rcu_unlock_domain(dest_d);
+            break;
+        }
+
         /*
          * Mark the source domain as dying to prevent further changes of its
          * mappings. is_dying flag is protected by domctl_lock.
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index f044c0f..ba44f87 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -193,6 +193,18 @@ static XSM_INLINE int xsm_memory_exchange(XSM_DEFAULT_ARG 
struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
+static XSM_INLINE int xsm_soft_reset(XSM_DEFAULT_ARG struct domain *d1,
+                                    struct domain *d2)
+{
+    /*
+     * As it is not possible for a domain to have more than one target at a
+     * time the result of doing XSM_TARGET check for both domains would be
+     * equivalent to the XSM_PRIV check below.
+     */
+    XSM_ASSERT_ACTION(XSM_PRIV);
+    return xsm_default_action(action, current->domain, NULL);
+}
+
 static XSM_INLINE int xsm_memory_adjust_reservation(XSM_DEFAULT_ARG struct 
domain *d1,
                                                             struct domain *d2)
 {
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index c872d44..6edddfa 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -102,6 +102,8 @@ struct xsm_operations {
     int (*kexec) (void);
     int (*schedop_shutdown) (struct domain *d1, struct domain *d2);
 
+    int (*soft_reset) (struct domain *d1, struct domain *d2);
+
     char *(*show_irq_sid) (int irq);
     int (*map_domain_pirq) (struct domain *d);
     int (*map_domain_irq) (struct domain *d, int irq, void *data);
@@ -351,6 +353,12 @@ static inline int xsm_memory_exchange (xsm_default_t def, 
struct domain *d)
     return xsm_ops->memory_exchange(d);
 }
 
+static inline int xsm_soft_reset (xsm_default_t def, struct domain *d1,
+                                 struct domain *d2)
+{
+    return xsm_ops->soft_reset(d1, d2);
+}
+
 static inline int xsm_memory_adjust_reservation (xsm_default_t def, struct 
domain *d1, struct
                                                                     domain *d2)
 {
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index e84b0e4..0f1158e 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -76,6 +76,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, kexec);
     set_to_dummy_if_null(ops, schedop_shutdown);
 
+    set_to_dummy_if_null(ops, soft_reset);
+
     set_to_dummy_if_null(ops, show_irq_sid);
     set_to_dummy_if_null(ops, map_domain_pirq);
     set_to_dummy_if_null(ops, map_domain_irq);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 11b7453..7ade3eb 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -565,6 +565,21 @@ static int flask_set_target(struct domain *d, struct 
domain *t)
     return rc;
 }
 
+static int flask_soft_reset(struct domain *d1, struct domain *d2)
+{
+    int rc;
+
+    rc = current_has_perm(d1, SECCLASS_DOMAIN2, DOMAIN2__SOFT_RESET);
+    if (rc)
+        return rc;
+
+    rc = current_has_perm(d2, SECCLASS_DOMAIN2, DOMAIN2__SOFT_RESET);
+    if (rc)
+        return rc;
+
+    return domain_has_perm(d1, d2, SECCLASS_DOMAIN2, DOMAIN2__RESET_TRANSFER);
+}
+
 static int flask_domctl(struct domain *d, int cmd)
 {
     switch ( cmd )
@@ -578,6 +593,7 @@ static int flask_domctl(struct domain *d, int cmd)
     case XEN_DOMCTL_memory_mapping:
     case XEN_DOMCTL_set_target:
     case XEN_DOMCTL_vm_event_op:
+    case XEN_DOMCTL_soft_reset:
 
     /* These have individual XSM hooks (arch/../domctl.c) */
     case XEN_DOMCTL_bind_pt_irq:
@@ -1629,6 +1645,8 @@ static struct xsm_operations flask_ops = {
     .kexec = flask_kexec,
     .schedop_shutdown = flask_schedop_shutdown,
 
+    .soft_reset = flask_soft_reset,
+
     .show_irq_sid = flask_show_irq_sid,
 
     .map_domain_pirq = flask_map_domain_pirq,
diff --git a/xen/xsm/flask/policy/access_vectors 
b/xen/xsm/flask/policy/access_vectors
index ea556df..5611760 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -224,6 +224,13 @@ class domain2
 # XEN_DOMCTL_monitor_op
 # XEN_DOMCTL_vm_event_op
     vm_event
+# XEN_DOMCTL_soft_reset:
+#  source = domain making the hypercall
+#  target = domain being reset (source or destination)
+    soft_reset
+#  source = source domain being reset
+#  target = destination domain being reset
+    reset_transfer
 # XENMEM_access_op
     mem_access
 # XENMEM_paging_op
-- 
1.9.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.