[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 3/7] xen/arm: Enable the compilation of mem_access and mem_event on ARM.



On Fri, Aug 22, 2014 at 2:30 AM, Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx> wrote:
This patch sets up the infrastructure to support mem_access and mem_event
Âon ARM and turns on compilation. We define the required XSM functions,
handling of domctl copyback, and the required p2m types and stub-functions
in this patch.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxxxxx>
Non-ARM bits LGTM to me. I see here the disablement of CONFIG_X86.

If Xen were to ever support another architecture (hello IA64), it might be more reasonable to keep an #ifdef CONFIG_X86 && CONFIG_ARM. I don't know how unlikely that future direction might be.

AndresÂ
---
Âxen/arch/arm/domctl.c    | 36 ++++++++++++++--
Âxen/arch/arm/mm.c      | 18 ++++++--
Âxen/arch/arm/p2m.c     Â| Â5 +++
Âxen/common/mem_access.c   | Â6 +--
Âxen/common/mem_event.c   Â| 15 +++++--
Âxen/include/asm-arm/p2m.h  | 100 ++++++++++++++++++++++++++++++++++---------
Âxen/include/xen/mem_access.h |Â 19 --------
Âxen/include/xen/mem_event.h | 53 +++--------------------
Âxen/include/xen/sched.h   | Â1 -
Âxen/include/xsm/dummy.h   | 24 +++++------
Âxen/include/xsm/xsm.h    | 25 +++++------
Âxen/xsm/dummy.c       | Â4 +-
Â12 files changed, 178 insertions(+), 128 deletions(-)

diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 45974e7..bb0b8d3 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -11,10 +11,17 @@
Â#include <xen/sched.h>
Â#include <xen/hypercall.h>
Â#include <public/domctl.h>
+#include <asm/guest_access.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>

Âlong arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
          ÂXEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
Â{
+
+Â Â long ret;
+Â Â bool_t copyback = 0;
+
  Âswitch ( domctl->cmd )
  Â{
  Âcase XEN_DOMCTL_cacheflush:
@@ -23,17 +30,38 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
    Âunsigned long e = s + domctl->u.cacheflush.nr_pfns;

    Âif ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
-Â Â Â Â Â Â return -EINVAL;
+Â Â Â Â {
+Â Â Â Â Â Â ret = -EINVAL;
+Â Â Â Â Â Â break;
+Â Â Â Â }

    Âif ( e < s )
-Â Â Â Â Â Â return -EINVAL;
+Â Â Â Â {
+Â Â Â Â Â Â ret = -EINVAL;
+Â Â Â Â Â Â break;
+Â Â Â Â }

-Â Â Â Â return p2m_cache_flush(d, s, e);
+Â Â Â Â ret = p2m_cache_flush(d, s, e);
  Â}
+Â Â break;
+
+Â Â case XEN_DOMCTL_mem_event_op:
+Â Â {
+Â Â Â Â ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â guest_handle_cast(u_domctl, void));
+Â Â Â Â copyback = 1;
+Â Â }
+Â Â break;

  Âdefault:
-Â Â Â Â return subarch_do_domctl(domctl, d, u_domctl);
+Â Â Â Â ret = subarch_do_domctl(domctl, d, u_domctl);
+Â Â Â Â break;
  Â}
+
+Â Â if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
+Â Â Â Â ret = -EFAULT;
+
+Â Â return ret;
Â}

Âvoid arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0a243b0..cd04dec 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -35,6 +35,9 @@
Â#include <asm/current.h>
Â#include <asm/flushtlb.h>
Â#include <public/memory.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <xen/hypercall.h>
Â#include <xen/sched.h>
Â#include <xen/vmap.h>
Â#include <xsm/xsm.h>
@@ -1111,18 +1114,27 @@ int xenmem_add_to_physmap_one(

Âlong arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
Â{
-Â Â switch ( op )
+
+Â Â long rc;
+
+Â Â switch ( op & MEMOP_CMD_MASK )
  Â{
  Â/* XXX: memsharing not working yet */
  Âcase XENMEM_get_sharing_shared_pages:
  Âcase XENMEM_get_sharing_freed_pages:
    Âreturn 0;
+Â Â case XENMEM_access_op:
+Â Â {
+Â Â Â Â rc = mem_access_memop(op, guest_handle_cast(arg, xen_mem_access_op_t));
+Â Â Â Â break;
+Â Â }

  Âdefault:
-Â Â Â Â return -ENOSYS;
+Â Â Â Â rc = -ENOSYS;
+Â Â Â Â break;
  Â}

-Â Â return 0;
+Â Â return rc;
Â}

Âstruct domain *page_get_owner_and_reference(struct page_info *page)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 143199b..0ca0d2f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,9 @@
Â#include <asm/event.h>
Â#include <asm/hardirq.h>
Â#include <asm/page.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/mem_access.h>

Â/* First level P2M is 2 consecutive pages */
Â#define P2M_FIRST_ORDER 1
@@ -999,6 +1002,8 @@ int p2m_init(struct domain *d)
  Âp2m->max_mapped_gfn = 0;
  Âp2m->lowest_mapped_gfn = ULONG_MAX;

+Â Â p2m->default_access = p2m_access_rwx;
+
Âerr:
  Âspin_unlock(&p2m->lock);

diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 84acdf9..6bb9cf4 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -29,8 +29,6 @@
Â#include <xen/mem_event.h>
Â#include <xsm/xsm.h>

-#ifdef CONFIG_X86
-
Âint mem_access_memop(unsigned long cmd,
           XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
Â{
@@ -45,9 +43,11 @@ int mem_access_memop(unsigned long cmd,
  Âif ( rc )
    Âreturn rc;

+#ifdef CONFIG_X86
  Ârc = -EINVAL;
  Âif ( !is_hvm_domain(d) )
    Âgoto out;
+#endif

  Ârc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
  Âif ( rc )
@@ -125,8 +125,6 @@ int mem_access_send_req(struct domain *d, mem_event_request_t *req)
  Âreturn 0;
Â}

-#endif /* CONFIG_X86 */
-
Â/*
 * Local variables:
 * mode: C
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index a94ddf6..2a91928 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -20,16 +20,19 @@
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */

-#ifdef CONFIG_X86
-
+#include <xen/sched.h>
Â#include <asm/domain.h>
Â#include <xen/event.h>
Â#include <xen/wait.h>
Â#include <asm/p2m.h>
Â#include <xen/mem_event.h>
Â#include <xen/mem_access.h>
+
+#ifdef CONFIG_X86
Â#include <asm/mem_paging.h>
Â#include <asm/mem_sharing.h>
+#endif
+
Â#include <xsm/xsm.h>

Â/* for public/io/ring.h macros */
@@ -427,6 +430,7 @@ static void mem_access_notification(struct vcpu *v, unsigned int port)
    Âp2m_mem_access_resume(v->domain);
Â}

+#ifdef CONFIG_X86
Â/* Registered with Xen-bound event channel for incoming notifications. */
Âstatic void mem_paging_notification(struct vcpu *v, unsigned int port)
Â{
@@ -470,6 +474,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
  Ârcu_unlock_domain(d);
  Âreturn ret;
Â}
+#endif

Â/* Clean up on domain destruction */
Âvoid mem_event_cleanup(struct domain *d)
@@ -538,6 +543,8 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
    Â{
    Âcase XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
    Â{
+
+#ifdef CONFIG_X86
      Ârc = -ENODEV;
      Â/* Only HAP is supported */
      Âif ( !hap_enabled(d) )
@@ -546,6 +553,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
      Â/* Currently only EPT is supported */
      Âif ( !cpu_has_vmx )
        Âbreak;
+#endif

      Ârc = mem_event_enable(d, mec, med, _VPF_mem_access,
                  ÂHVM_PARAM_ACCESS_RING_PFN,
@@ -567,6 +575,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
  Â}
  Âbreak;

+#ifdef CONFIG_X86
  Âcase XEN_DOMCTL_MEM_EVENT_OP_PAGING:
  Â{
    Âstruct mem_event_domain *med = &d->mem_event->paging;
@@ -656,6 +665,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
    Â}
  Â}
  Âbreak;
+#endif

  Âdefault:
    Ârc = -ENOSYS;
@@ -695,7 +705,6 @@ void mem_event_vcpu_unpause(struct vcpu *v)

  Âvcpu_unpause(v);
Â}
-#endif

Â/*
 * Local variables:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 06c93a0..f3d1f33 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,9 +2,55 @@
Â#define _XEN_P2M_H

Â#include <xen/mm.h>
+#include <public/memory.h>
+#include <public/mem_event.h>

Âstruct domain;

+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+  p2m_invalid = 0,  /* Nothing mapped here */
+  p2m_ram_rw,    Â/* Normal read/write guest RAM */
+  p2m_ram_ro,    Â/* Read-only; writes are silently dropped */
+  p2m_mmio_direct,  /* Read/write mapping of genuine MMIO area */
+  p2m_map_foreign,  /* Ram pages from foreign domain */
+  p2m_grant_map_rw, Â/* Read/write grant mapping */
+  p2m_grant_map_ro, Â/* Read-only grant mapping */
+Â Â /* The types below are only used to decide the page attribute in the P2M */
+  p2m_iommu_map_rw, Â/* Read/write iommu mapping */
+  p2m_iommu_map_ro, Â/* Read-only iommu mapping */
+  p2m_max_real_type, /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given by the p2m_type_t memory type. Violations
+ * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of page
+ * type or use occurs, or when pages are flushed, swapped, or at any other
+ * convenient type, the access permissions can get reset to the p2m_domain
+ * default.
+ */
+typedef enum {
+  p2m_access_n  Â= 0, /* No access permissions allowed */
+  p2m_access_r  Â= 1,
+  p2m_access_w  Â= 2,
+  p2m_access_rw  = 3,
+  p2m_access_x  Â= 4,
+  p2m_access_rx  = 5,
+  p2m_access_wx  = 6,
+  p2m_access_rwx Â= 7
+
+Â Â /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
Â/* Per-p2m-table state */
Âstruct p2m_domain {
  Â/* Lock that protects updates to the p2m */
@@ -38,27 +84,17 @@ struct p2m_domain {
     * at each p2m tree level. */
    Âunsigned long shattered[4];
  Â} stats;
-};

-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-  p2m_invalid = 0,  /* Nothing mapped here */
-  p2m_ram_rw,    Â/* Normal read/write guest RAM */
-  p2m_ram_ro,    Â/* Read-only; writes are silently dropped */
-  p2m_mmio_direct,  /* Read/write mapping of genuine MMIO area */
-  p2m_map_foreign,  /* Ram pages from foreign domain */
-  p2m_grant_map_rw, Â/* Read/write grant mapping */
-  p2m_grant_map_ro, Â/* Read-only grant mapping */
-Â Â /* The types below are only used to decide the page attribute in the P2M */
-  p2m_iommu_map_rw, Â/* Read/write iommu mapping */
-  p2m_iommu_map_ro, Â/* Read-only iommu mapping */
-  p2m_max_real_type, /* Types after this won't be store in the p2m */
-} p2m_type_t;
+Â Â /* Default P2M access type for each page in the the domain: new pages,
+Â Â Â* swapped in pages, cleared pages, and pages that are ambiquously
+  Â* retyped get this access type. See definition of p2m_access_t. */
+Â Â p2m_access_t default_access;
+
+Â Â /* If true, and an access fault comes in and there is no mem_event listener,
+  Â* pause domain. Otherwise, remove access restrictions. */
+  bool_t   Âaccess_required;
+
+};

Â#define p2m_is_foreign(_t)Â ((_t) == p2m_map_foreign)
Â#define p2m_is_ram(_t)Â Â Â ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
@@ -195,6 +231,30 @@ static inline int get_page_and_type(struct page_info *page,
  Âreturn rc;
Â}

+/* get host p2m table */
+#define p2m_get_hostp2m(d)Â Â Â (&((d)->arch.p2m))
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+static inline void p2m_mem_access_resume(struct domain *d) {}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+static inline
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+Â Â Â Â Â Â Â Â Â Â Â Â uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+Â Â return -ENOSYS;
+}
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+static inline
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+Â Â Â Â Â Â Â Â Â Â Â Âxenmem_access_t *access)
+{
+Â Â return -ENOSYS;
+}
+
Â#endif /* _XEN_P2M_H */

Â/*
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index ded5441..5c7c5fd 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -23,29 +23,10 @@
Â#ifndef _XEN_ASM_MEM_ACCESS_H
Â#define _XEN_ASM_MEM_ACCESS_H

-#ifdef CONFIG_X86
-
Âint mem_access_memop(unsigned long cmd,
           XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
Âint mem_access_send_req(struct domain *d, mem_event_request_t *req);

-#else
-
-static inline
-int mem_access_memop(unsigned long cmd,
-Â Â Â Â Â Â Â Â Â Â ÂXEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
-{
-Â Â return -ENOSYS;
-}
-
-static inline
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
-{
-Â Â return -ENOSYS;
-}
-
-#endif /* CONFIG_X86 */
-
Â#endif /* _XEN_ASM_MEM_ACCESS_H */

Â/*
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
index a28d453..e2a9d4d 100644
--- a/xen/include/xen/mem_event.h
+++ b/xen/include/xen/mem_event.h
@@ -24,8 +24,6 @@
Â#ifndef __MEM_EVENT_H__
Â#define __MEM_EVENT_H__

-#ifdef CONFIG_X86
-
Â/* Clean up on domain destruction */
Âvoid mem_event_cleanup(struct domain *d);

@@ -67,66 +65,25 @@ void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
Âint mem_event_get_response(struct domain *d, struct mem_event_domain *med,
              mem_event_response_t *rsp);

-int do_mem_event_op(int op, uint32_t domain, void *arg);
Âint mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
           XEN_GUEST_HANDLE_PARAM(void) u_domctl);

Âvoid mem_event_vcpu_pause(struct vcpu *v);
Âvoid mem_event_vcpu_unpause(struct vcpu *v);

-#else
-
-static inline void mem_event_cleanup(struct domain *d) {}
-
-static inline bool_t mem_event_check_ring(struct mem_event_domain *med)
-{
-Â Â return 0;
-}
-
-static inline int mem_event_claim_slot(struct domain *d,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct mem_event_domain *med)
-{
-Â Â return -ENOSYS;
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct mem_event_domain *med)
-{
-Â Â return -ENOSYS;
-}
-
-static inline
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
-{}
-
-static inline
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â mem_event_request_t *req)
-{}
+#ifdef CONFIG_X86

-static inline
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Âmem_event_response_t *rsp)
-{
-Â Â return -ENOSYS;
-}
+int do_mem_event_op(int op, uint32_t domain, void *arg);

-static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
-Â Â return -ENOSYS;
-}
+#else

Âstatic inline
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
-Â Â Â Â Â Â Â Â Â Â ÂXEN_GUEST_HANDLE_PARAM(void) u_domctl)
+int do_mem_event_op(int op, uint32_t domain, void *arg)
Â{
  Âreturn -ENOSYS;
Â}

-static inline void mem_event_vcpu_pause(struct vcpu *v) {}
-static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
-
-#endif /* CONFIG_X86 */
+#endif

Â#endif /* __MEM_EVENT_H__ */

diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 4575dda..2365fad 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1,4 +1,3 @@
-
Â#ifndef __SCHED_H__
Â#define __SCHED_H__

diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index c5aa316..61677ea 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -507,6 +507,18 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG struct domain *d)
  Âreturn xsm_default_action(action, current->domain, d);
Â}

+static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, int mode, int op)
+{
+Â Â XSM_ASSERT_ACTION(XSM_PRIV);
+Â Â return xsm_default_action(action, current->domain, d);
+}
+
+static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int op)
+{
+Â Â XSM_ASSERT_ACTION(XSM_DM_PRIV);
+Â Â return xsm_default_action(action, current->domain, d);
+}
+
Â#ifdef CONFIG_X86
Âstatic XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
Â{
@@ -550,18 +562,6 @@ static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int
  Âreturn xsm_default_action(action, current->domain, d);
Â}

-static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain *d, int mode, int op)
-{
-Â Â XSM_ASSERT_ACTION(XSM_PRIV);
-Â Â return xsm_default_action(action, current->domain, d);
-}
-
-static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int op)
-{
-Â Â XSM_ASSERT_ACTION(XSM_DM_PRIV);
-Â Â return xsm_default_action(action, current->domain, d);
-}
-
Âstatic XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
Â{
  ÂXSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index a85045d..64289cd 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -140,6 +140,9 @@ struct xsm_operations {
  Âint (*hvm_control) (struct domain *d, unsigned long op);
  Âint (*hvm_param_nested) (struct domain *d);

+Â Â int (*mem_event_control) (struct domain *d, int mode, int op);
+Â Â int (*mem_event_op) (struct domain *d, int op);
+
Â#ifdef CONFIG_X86
  Âint (*do_mca) (void);
  Âint (*shadow_control) (struct domain *d, uint32_t op);
@@ -148,8 +151,6 @@ struct xsm_operations {
  Âint (*hvm_set_pci_link_route) (struct domain *d);
  Âint (*hvm_inject_msi) (struct domain *d);
  Âint (*hvm_ioreq_server) (struct domain *d, int op);
-Â Â int (*mem_event_control) (struct domain *d, int mode, int op);
-Â Â int (*mem_event_op) (struct domain *d, int op);
  Âint (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
  Âint (*apic) (struct domain *d, int cmd);
  Âint (*memtype) (uint32_t access);
@@ -534,6 +535,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t def, struct domain *d)
  Âreturn xsm_ops->hvm_param_nested(d);
Â}

+static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, int mode, int op)
+{
+Â Â return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int op)
+{
+Â Â return xsm_ops->mem_event_op(d, op);
+}
+
Â#ifdef CONFIG_X86
Âstatic inline int xsm_do_mca(xsm_default_t def)
Â{
@@ -570,16 +581,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, int
  Âreturn xsm_ops->hvm_ioreq_server(d, op);
Â}

-static inline int xsm_mem_event_control (xsm_default_t def, struct domain *d, int mode, int op)
-{
-Â Â return xsm_ops->mem_event_control(d, mode, op);
-}
-
-static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, int op)
-{
-Â Â return xsm_ops->mem_event_op(d, op);
-}
-
Âstatic inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, struct domain *cd, int op)
Â{
  Âreturn xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index c95c803..9df9d81 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
  Âset_to_dummy_if_null(ops, add_to_physmap);
  Âset_to_dummy_if_null(ops, remove_from_physmap);
  Âset_to_dummy_if_null(ops, map_gmfn_foreign);
+Â Â set_to_dummy_if_null(ops, mem_event_control);
+Â Â set_to_dummy_if_null(ops, mem_event_op);

Â#ifdef CONFIG_X86
  Âset_to_dummy_if_null(ops, do_mca);
@@ -125,8 +127,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
  Âset_to_dummy_if_null(ops, hvm_set_pci_link_route);
  Âset_to_dummy_if_null(ops, hvm_inject_msi);
  Âset_to_dummy_if_null(ops, hvm_ioreq_server);
-Â Â set_to_dummy_if_null(ops, mem_event_control);
-Â Â set_to_dummy_if_null(ops, mem_event_op);
  Âset_to_dummy_if_null(ops, mem_sharing_op);
  Âset_to_dummy_if_null(ops, apic);
  Âset_to_dummy_if_null(ops, platform_op);
--
2.0.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.