[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.5 v8 04/19] xen: Relocate p2m_mem_access_resume to mem_access common



Relocate p2m_mem_access_resume to common and abstract the new
p2m_mem_event_emulate_check into the p2m layer to.

Signed-off-by: Tamas K Lengyel <tklengyel@xxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v8: Abstract p2m_mem_event_emulate_check.
v6: Keep the comment describing the function.
v5: Style fix.
---
 xen/arch/x86/mm/p2m.c        | 128 ++++++++++++++++++-------------------------
 xen/common/mem_access.c      |  28 +++++++++-
 xen/common/mem_event.c       |   2 +-
 xen/include/asm-arm/p2m.h    |   7 +++
 xen/include/asm-x86/p2m.h    |   7 ++-
 xen/include/xen/mem_access.h |   5 ++
 6 files changed, 99 insertions(+), 78 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 31d0d9e..92b20bc 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1382,6 +1382,60 @@ static void p2m_mem_event_fill_regs(mem_event_request_t 
*req)
     req->x86_regs.cs_arbytes = seg.attr.bytes;
 }
 
+void p2m_mem_event_emulate_check(struct domain *d, const mem_event_response_t 
*rsp)
+{
+    /* Mark vcpu for skipping one instruction upon rescheduling. */
+    if ( rsp->flags & MEM_EVENT_FLAG_EMULATE )
+    {
+        struct vcpu *v = current;
+        xenmem_access_t access;
+        bool_t violation = 1;
+
+        if ( p2m_get_mem_access(d, rsp->gfn, &access) == 0 )
+        {
+            switch ( access )
+            {
+            case XENMEM_access_n:
+            case XENMEM_access_n2rwx:
+            default:
+                violation = rsp->access_r || rsp->access_w || rsp->access_x;
+                break;
+
+            case XENMEM_access_r:
+                violation = rsp->access_w || rsp->access_x;
+                break;
+
+            case XENMEM_access_w:
+                violation = rsp->access_r || rsp->access_x;
+                break;
+
+            case XENMEM_access_x:
+                violation = rsp->access_r || rsp->access_w;
+                break;
+
+            case XENMEM_access_rx:
+            case XENMEM_access_rx2rw:
+                violation = rsp->access_w;
+                break;
+
+            case XENMEM_access_wx:
+                violation = rsp->access_r;
+                break;
+
+            case XENMEM_access_rw:
+                violation = rsp->access_x;
+                break;
+
+            case XENMEM_access_rwx:
+                violation = 0;
+                break;
+            }
+        }
+
+        v->arch.mem_event.emulate_flags = violation ? rsp->flags : 0;
+    }
+}
+
 bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
                             struct npfec npfec,
                             mem_event_request_t **req_ptr)
@@ -1509,80 +1563,6 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long 
gla,
     return (p2ma == p2m_access_n2rwx);
 }
 
-void p2m_mem_access_resume(struct domain *d)
-{
-    mem_event_response_t rsp;
-
-    /* Pull all responses off the ring */
-    while( mem_event_get_response(d, &d->mem_event->access, &rsp) )
-    {
-        struct vcpu *v;
-
-        if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
-            continue;
-
-        /* Validate the vcpu_id in the response. */
-        if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
-            continue;
-
-        v = d->vcpu[rsp.vcpu_id];
-
-        /* Mark vcpu for skipping one instruction upon rescheduling. */
-        if ( rsp.flags & MEM_EVENT_FLAG_EMULATE )
-        {
-            xenmem_access_t access;
-            bool_t violation = 1;
-
-            if ( p2m_get_mem_access(d, rsp.gfn, &access) == 0 )
-            {
-                switch ( access )
-                {
-                case XENMEM_access_n:
-                case XENMEM_access_n2rwx:
-                default:
-                    violation = rsp.access_r || rsp.access_w || rsp.access_x;
-                    break;
-
-                case XENMEM_access_r:
-                    violation = rsp.access_w || rsp.access_x;
-                    break;
-
-                case XENMEM_access_w:
-                    violation = rsp.access_r || rsp.access_x;
-                    break;
-
-                case XENMEM_access_x:
-                    violation = rsp.access_r || rsp.access_w;
-                    break;
-
-                case XENMEM_access_rx:
-                case XENMEM_access_rx2rw:
-                    violation = rsp.access_w;
-                    break;
-
-                case XENMEM_access_wx:
-                    violation = rsp.access_r;
-                    break;
-
-                case XENMEM_access_rw:
-                    violation = rsp.access_x;
-                    break;
-
-                case XENMEM_access_rwx:
-                    violation = 0;
-                    break;
-                }
-            }
-
-            v->arch.mem_event.emulate_flags = violation ? rsp.flags : 0;
-        }
-
-        /* Unpause domain */
-        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
-            mem_event_vcpu_unpause(v);
-    }
-}
-
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 9a8c1a9..42423c1 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -29,6 +29,32 @@
 #include <asm/p2m.h>
 #include <xsm/xsm.h>
 
+void mem_access_resume(struct domain *d)
+{
+    mem_event_response_t rsp;
+
+    /* Pull all responses off the ring. */
+    while ( mem_event_get_response(d, &d->mem_event->access, &rsp) )
+    {
+        struct vcpu *v;
+
+        if ( rsp.flags & MEM_EVENT_FLAG_DUMMY )
+            continue;
+
+        /* Validate the vcpu_id in the response. */
+        if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
+            continue;
+
+        v = d->vcpu[rsp.vcpu_id];
+
+        p2m_mem_event_emulate_check(d, &rsp);
+
+        /* Unpause domain. */
+        if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+            mem_event_vcpu_unpause(v);
+    }
+}
+
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
 {
@@ -58,7 +84,7 @@ int mem_access_memop(unsigned long cmd,
     switch ( mao.op )
     {
     case XENMEM_access_op_resume:
-        p2m_mem_access_resume(d);
+        mem_access_resume(d);
         rc = 0;
         break;
 
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index 7cc99b3..9f1a1b0 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -439,7 +439,7 @@ static void mem_paging_notification(struct vcpu *v, 
unsigned int port)
 static void mem_access_notification(struct vcpu *v, unsigned int port)
 {
     if ( likely(v->domain->mem_event->access.ring_page != NULL) )
-        p2m_mem_access_resume(v->domain);
+        mem_access_resume(v->domain);
 }
 
 #ifdef HAS_MEM_SHARING
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index faf14d3..b64dd9a 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -66,6 +66,13 @@ typedef enum {
     p2m_max_real_type,  /* Types after this won't be store in the p2m */
 } p2m_type_t;
 
+static inline
+void p2m_mem_event_emulate_check(struct domain *d,
+                                 const mem_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+};
+
 #define p2m_is_foreign(_t)  ((_t) == p2m_map_foreign)
 #define p2m_is_ram(_t)      ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index a2a6289..2513c6f 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -573,8 +573,6 @@ void p2m_mem_paging_resume(struct domain *d);
 bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
                             struct npfec npfec,
                             mem_event_request_t **req_ptr);
-/* Resumes the running of the VCPU, restarting the last instruction */
-void p2m_mem_access_resume(struct domain *d);
 
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
@@ -586,6 +584,11 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
start_pfn, uint32_t nr,
 int p2m_get_mem_access(struct domain *d, unsigned long pfn,
                        xenmem_access_t *access);
 
+/* Check for emulation and mark vcpu for skipping one instruction
+ * upon rescheduling if required. */
+void p2m_mem_event_emulate_check(struct domain *d,
+                                 const mem_event_response_t *rsp);
+
 /* 
  * Internal functions, only called by other p2m code
  */
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 19d1a2d..6ceb2a4 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -31,6 +31,9 @@ int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
 int mem_access_send_req(struct domain *d, mem_event_request_t *req);
 
+/* Resumes the running of the VCPU, restarting the last instruction */
+void mem_access_resume(struct domain *d);
+
 #else
 
 static inline
@@ -46,6 +49,8 @@ int mem_access_send_req(struct domain *d, mem_event_request_t 
*req)
     return -ENOSYS;
 }
 
+static inline void mem_access_resume(struct domain *d) {}
+
 #endif /* HAS_MEM_ACCESS */
 
 #endif /* _XEN_ASM_MEM_ACCESS_H */
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.