[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 14/16] xen/arm: Enable the compilation of mem_access and mem_event on ARM.



This patch sets up the infrastructure to support mem_access and mem_event
on ARM and turns on compilation. We define the required XSM functions.

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
v3: Wrap mem_event related functions in XSM into #ifdef HAS_MEM_ACCESS
       blocks.
    Update XSM hooks in flask to properly wire it up on ARM.

v2: Add CONFIG_MEM_PAGING and CONFIG_MEM_SHARING definitions and
       use them instead of CONFIG_X86.
    Split domctl copy-back and p2m type definitions into separate
       patches and move this patch to the end of the series.
---
 xen/arch/arm/Rules.mk        |  1 +
 xen/common/mem_event.c       | 19 +++++++++++++++++++
 xen/include/asm-x86/config.h |  3 +++
 xen/include/xsm/dummy.h      | 26 ++++++++++++++------------
 xen/include/xsm/xsm.h        | 29 +++++++++++++++++------------
 xen/xsm/dummy.c              |  7 +++++--
 xen/xsm/flask/hooks.c        | 33 ++++++++++++++++++++-------------
 7 files changed, 79 insertions(+), 39 deletions(-)

diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
index 8658176..f6781b5 100644
--- a/xen/arch/arm/Rules.mk
+++ b/xen/arch/arm/Rules.mk
@@ -10,6 +10,7 @@ HAS_DEVICE_TREE := y
 HAS_VIDEO := y
 HAS_ARM_HDLCD := y
 HAS_PASSTHROUGH := y
+HAS_MEM_ACCESS := y
 
 CFLAGS += -I$(BASEDIR)/include
 
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index fbbed43..096cbff 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -27,8 +27,15 @@
 #include <asm/p2m.h>
 #include <xen/mem_event.h>
 #include <xen/mem_access.h>
+
+#ifdef CONFIG_MEM_PAGING
 #include <asm/mem_paging.h>
+#endif
+
+#ifdef CONFIG_MEM_SHARING
 #include <asm/mem_sharing.h>
+#endif
+
 #include <xsm/xsm.h>
 
 /* for public/io/ring.h macros */
@@ -424,12 +431,14 @@ int __mem_event_claim_slot(struct domain *d, struct 
mem_event_domain *med,
         return mem_event_grab_slot(med, (current->domain != d));
 }
 
+#ifdef CONFIG_MEM_PAGING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_paging_notification(struct vcpu *v, unsigned int port)
 {
     if ( likely(v->domain->mem_event->paging.ring_page != NULL) )
         p2m_mem_paging_resume(v->domain);
 }
+#endif
 
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_access_notification(struct vcpu *v, unsigned int port)
@@ -438,15 +447,20 @@ static void mem_access_notification(struct vcpu *v, 
unsigned int port)
         mem_access_resume(v->domain);
 }
 
+#ifdef CONFIG_MEM_SHARING
 /* Registered with Xen-bound event channel for incoming notifications. */
 static void mem_sharing_notification(struct vcpu *v, unsigned int port)
 {
     if ( likely(v->domain->mem_event->share.ring_page != NULL) )
         mem_sharing_sharing_resume(v->domain);
 }
+#endif
 
 int do_mem_event_op(int op, uint32_t domain, void *arg)
 {
+#if !defined(CONFIG_MEM_PAGING) && !defined(CONFIG_MEM_SHARING)
+    return -ENOSYS;
+#else
     int ret;
     struct domain *d;
 
@@ -473,6 +487,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
  out:
     rcu_unlock_domain(d);
     return ret;
+#endif
 }
 
 /* Clean up on domain destruction */
@@ -533,6 +548,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
 
     switch ( mec->mode )
     {
+#ifdef CONFIG_MEM_PAGING
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
     {
         struct mem_event_domain *med = &d->mem_event->paging;
@@ -583,6 +599,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
         }
     }
     break;
+#endif
 
     case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
     {
@@ -617,6 +634,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
     }
     break;
 
+#ifdef CONFIG_MEM_SHARING
     case XEN_DOMCTL_MEM_EVENT_OP_SHARING:
     {
         struct mem_event_domain *med = &d->mem_event->share;
@@ -655,6 +673,7 @@ int mem_event_domctl(struct domain *d, 
xen_domctl_mem_event_op_t *mec,
         }
     }
     break;
+#endif
 
     default:
         rc = -ENOSYS;
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 8a864ce..f8ef043 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -57,6 +57,9 @@
 #define CONFIG_LATE_HWDOM 1
 #endif
 
+#define CONFIG_MEM_SHARING 1
+#define CONFIG_MEM_PAGING 1
+
 #define HZ 100
 
 #define OPT_CONSOLE_STR "vga"
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index c5aa316..cea2e63 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -507,6 +507,20 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG 
struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
+#ifdef HAS_MEM_ACCESS
+static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, 
int mode, int op)
+{
+    XSM_ASSERT_ACTION(XSM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+
+static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int 
op)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+#endif
+
 #ifdef CONFIG_X86
 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
 {
@@ -550,18 +564,6 @@ static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG 
struct domain *d, int
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, 
int mode, int op)
-{
-    XSM_ASSERT_ACTION(XSM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int 
op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, 
struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index a85045d..6c3032c 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -140,6 +140,11 @@ struct xsm_operations {
     int (*hvm_control) (struct domain *d, unsigned long op);
     int (*hvm_param_nested) (struct domain *d);
 
+#ifdef HAS_MEM_ACCESS
+    int (*mem_event_control) (struct domain *d, int mode, int op);
+    int (*mem_event_op) (struct domain *d, int op);
+#endif
+
 #ifdef CONFIG_X86
     int (*do_mca) (void);
     int (*shadow_control) (struct domain *d, uint32_t op);
@@ -148,8 +153,6 @@ struct xsm_operations {
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
     int (*hvm_ioreq_server) (struct domain *d, int op);
-    int (*mem_event_control) (struct domain *d, int mode, int op);
-    int (*mem_event_op) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -534,6 +537,18 @@ static inline int xsm_hvm_param_nested (xsm_default_t def, 
struct domain *d)
     return xsm_ops->hvm_param_nested(d);
 }
 
+#ifdef HAS_MEM_ACCESS
+static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, 
int mode, int op)
+{
+    return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int 
op)
+{
+    return xsm_ops->mem_event_op(d, op);
+}
+#endif
+
 #ifdef CONFIG_X86
 static inline int xsm_do_mca(xsm_default_t def)
 {
@@ -570,16 +585,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t def, 
struct domain *d, int
     return xsm_ops->hvm_ioreq_server(d, op);
 }
 
-static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, 
int mode, int op)
-{
-    return xsm_ops->mem_event_control(d, mode, op);
-}
-
-static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int 
op)
-{
-    return xsm_ops->mem_event_op(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, 
struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index c95c803..e9cdc01 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -117,6 +117,11 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, remove_from_physmap);
     set_to_dummy_if_null(ops, map_gmfn_foreign);
 
+#ifdef HAS_MEM_ACCESS
+    set_to_dummy_if_null(ops, mem_event_control);
+    set_to_dummy_if_null(ops, mem_event_op);
+#endif
+
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
@@ -125,8 +130,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
-    set_to_dummy_if_null(ops, mem_event_control);
-    set_to_dummy_if_null(ops, mem_event_op);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, platform_op);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index f2f59ea..96efd0b 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -571,6 +571,9 @@ static int flask_domctl(struct domain *d, int cmd)
     case XEN_DOMCTL_irq_permission:
     case XEN_DOMCTL_iomem_permission:
     case XEN_DOMCTL_set_target:
+#ifdef HAS_MEM_ACCESS
+    case XEN_DOMCTL_mem_event_op:
+#endif
 #ifdef CONFIG_X86
     /* These have individual XSM hooks (arch/x86/domctl.c) */
     case XEN_DOMCTL_shadow_op:
@@ -579,7 +582,6 @@ static int flask_domctl(struct domain *d, int cmd)
     case XEN_DOMCTL_unbind_pt_irq:
     case XEN_DOMCTL_memory_mapping:
     case XEN_DOMCTL_ioport_mapping:
-    case XEN_DOMCTL_mem_event_op:
     /* These have individual XSM hooks (drivers/passthrough/iommu.c) */
     case XEN_DOMCTL_get_device_group:
     case XEN_DOMCTL_test_assign_device:
@@ -1181,6 +1183,18 @@ static int flask_deassign_device(struct domain *d, 
uint32_t machine_bdf)
 }
 #endif /* HAS_PASSTHROUGH && HAS_PCI */
 
+#ifdef HAS_MEM_ACCESS
+static int flask_mem_event_control(struct domain *d, int mode, int op)
+{
+    return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_event_op(struct domain *d, int op)
+{
+    return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+#endif /* HAS_MEM_ACCESS */
+
 #ifdef CONFIG_X86
 static int flask_do_mca(void)
 {
@@ -1291,16 +1305,6 @@ static int flask_hvm_ioreq_server(struct domain *d, int 
op)
     return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
 }
 
-static int flask_mem_event_control(struct domain *d, int mode, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
-}
-
-static int flask_mem_event_op(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__MEM_EVENT);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1567,6 +1571,11 @@ static struct xsm_operations flask_ops = {
     .deassign_device = flask_deassign_device,
 #endif
 
+#ifdef HAS_MEM_ACCESS
+    .mem_event_control = flask_mem_event_control,
+    .mem_event_op = flask_mem_event_op,
+#endif
+
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
@@ -1575,8 +1584,6 @@ static struct xsm_operations flask_ops = {
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
-    .mem_event_control = flask_mem_event_control,
-    .mem_event_op = flask_mem_event_op,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .platform_op = flask_platform_op,
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.