WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] mem_event: pass mem_event_domain pointer

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] mem_event: pass mem_event_domain pointer to mem_event functions
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Fri, 16 Sep 2011 16:00:11 +0100
Delivery-date: Fri, 16 Sep 2011 08:12:50 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Olaf Hering <olaf@xxxxxxxxx>
# Date 1316171611 -3600
# Node ID ed7586b1d515611e713b6ace72103e08d10a3347
# Parent  4815be3af73cd22d8225754ef5fff0ca03759ad7
mem_event: pass mem_event_domain pointer to mem_event functions

Pass a struct mem_event_domain pointer to the various mem_event
functions.  This will be used in a subsequent patch which creates
different ring buffers for the memshare, xenpaging and memaccess
functionality.

Remove the struct domain argument from some functions.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 4815be3af73c -r ed7586b1d515 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Sep 15 15:26:07 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Sep 16 12:13:31 2011 +0100
@@ -4025,7 +4025,7 @@
     if ( (p & HVMPME_onchangeonly) && (value == old) )
         return 1;
     
-    rc = mem_event_check_ring(d);
+    rc = mem_event_check_ring(d, &d->mem_event);
     if ( rc )
         return rc;
     
@@ -4048,7 +4048,7 @@
         req.gla_valid = 1;
     }
     
-    mem_event_put_request(d, &req);      
+    mem_event_put_request(d, &d->mem_event, &req);
     
     return 1;
 }
diff -r 4815be3af73c -r ed7586b1d515 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Thu Sep 15 15:26:07 2011 +0100
+++ b/xen/arch/x86/mm/mem_event.c       Fri Sep 16 12:13:31 2011 +0100
@@ -33,21 +33,21 @@
 #define xen_rmb()  rmb()
 #define xen_wmb()  wmb()
 
-#define mem_event_ring_lock_init(_d)  
spin_lock_init(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock_init(_med)  spin_lock_init(&(_med)->ring_lock)
+#define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
+#define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
 
-static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d, struct mem_event_domain *med, 
mfn_t ring_mfn, mfn_t shared_mfn)
 {
     int rc;
 
     /* Map ring and shared pages */
-    d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn));
-    if ( d->mem_event.ring_page == NULL )
+    med->ring_page = map_domain_page(mfn_x(ring_mfn));
+    if ( med->ring_page == NULL )
         goto err;
 
-    d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn));
-    if ( d->mem_event.shared_page == NULL )
+    med->shared_page = map_domain_page(mfn_x(shared_mfn));
+    if ( med->shared_page == NULL )
         goto err_ring;
 
     /* Allocate event channel */
@@ -56,15 +56,15 @@
     if ( rc < 0 )
         goto err_shared;
 
-    ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc;
-    d->mem_event.xen_port = rc;
+    ((mem_event_shared_page_t *)med->shared_page)->port = rc;
+    med->xen_port = rc;
 
     /* Prepare ring buffer */
-    FRONT_RING_INIT(&d->mem_event.front_ring,
-                    (mem_event_sring_t *)d->mem_event.ring_page,
+    FRONT_RING_INIT(&med->front_ring,
+                    (mem_event_sring_t *)med->ring_page,
                     PAGE_SIZE);
 
-    mem_event_ring_lock_init(d);
+    mem_event_ring_lock_init(med);
 
     /* Wake any VCPUs paused for memory events */
     mem_event_unpause_vcpus(d);
@@ -72,34 +72,34 @@
     return 0;
 
  err_shared:
-    unmap_domain_page(d->mem_event.shared_page);
-    d->mem_event.shared_page = NULL;
+    unmap_domain_page(med->shared_page);
+    med->shared_page = NULL;
  err_ring:
-    unmap_domain_page(d->mem_event.ring_page);
-    d->mem_event.ring_page = NULL;
+    unmap_domain_page(med->ring_page);
+    med->ring_page = NULL;
  err:
     return 1;
 }
 
-static int mem_event_disable(struct domain *d)
+static int mem_event_disable(struct mem_event_domain *med)
 {
-    unmap_domain_page(d->mem_event.ring_page);
-    d->mem_event.ring_page = NULL;
+    unmap_domain_page(med->ring_page);
+    med->ring_page = NULL;
 
-    unmap_domain_page(d->mem_event.shared_page);
-    d->mem_event.shared_page = NULL;
+    unmap_domain_page(med->shared_page);
+    med->shared_page = NULL;
 
     return 0;
 }
 
-void mem_event_put_request(struct domain *d, mem_event_request_t *req)
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, 
mem_event_request_t *req)
 {
     mem_event_front_ring_t *front_ring;
     RING_IDX req_prod;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    front_ring = &d->mem_event.front_ring;
+    front_ring = &med->front_ring;
     req_prod = front_ring->req_prod_pvt;
 
     /* Copy request */
@@ -107,23 +107,23 @@
     req_prod++;
 
     /* Update ring */
-    d->mem_event.req_producers--;
+    med->req_producers--;
     front_ring->req_prod_pvt = req_prod;
     RING_PUSH_REQUESTS(front_ring);
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 
-    notify_via_xen_event_channel(d, d->mem_event.xen_port);
+    notify_via_xen_event_channel(d, med->xen_port);
 }
 
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp)
 {
     mem_event_front_ring_t *front_ring;
     RING_IDX rsp_cons;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    front_ring = &d->mem_event.front_ring;
+    front_ring = &med->front_ring;
     rsp_cons = front_ring->rsp_cons;
 
     /* Copy response */
@@ -134,7 +134,7 @@
     front_ring->rsp_cons = rsp_cons;
     front_ring->sring->rsp_event = rsp_cons + 1;
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 }
 
 void mem_event_unpause_vcpus(struct domain *d)
@@ -152,35 +152,35 @@
     vcpu_sleep_nosync(v);
 }
 
-void mem_event_put_req_producers(struct domain *d)
+void mem_event_put_req_producers(struct mem_event_domain *med)
 {
-    mem_event_ring_lock(d);
-    d->mem_event.req_producers--;
-    mem_event_ring_unlock(d);
+    mem_event_ring_lock(med);
+    med->req_producers--;
+    mem_event_ring_unlock(med);
 }
 
-int mem_event_check_ring(struct domain *d)
+int mem_event_check_ring(struct domain *d, struct mem_event_domain *med)
 {
     struct vcpu *curr = current;
     int free_requests;
     int ring_full = 1;
 
-    if ( !d->mem_event.ring_page )
+    if ( !med->ring_page )
         return -1;
 
-    mem_event_ring_lock(d);
+    mem_event_ring_lock(med);
 
-    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
-    if ( d->mem_event.req_producers < free_requests )
+    free_requests = RING_FREE_REQUESTS(&med->front_ring);
+    if ( med->req_producers < free_requests )
     {
-        d->mem_event.req_producers++;
+        med->req_producers++;
         ring_full = 0;
     }
 
-    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
+    if ( ring_full && (curr->domain == d) )
         mem_event_mark_and_pause(curr);
 
-    mem_event_ring_unlock(d);
+    mem_event_ring_unlock(med);
 
     return ring_full;
 }
@@ -230,6 +230,7 @@
         {
             struct domain *dom_mem_event = current->domain;
             struct vcpu *v = current;
+            struct mem_event_domain *med = &d->mem_event;
             unsigned long ring_addr = mec->ring_addr;
             unsigned long shared_addr = mec->shared_addr;
             l1_pgentry_t l1e;
@@ -242,7 +243,7 @@
              * the cache is in an undefined state and so is the guest
              */
             rc = -EBUSY;
-            if ( d->mem_event.ring_page )
+            if ( med->ring_page )
                 break;
 
             /* Currently only EPT is supported */
@@ -270,7 +271,7 @@
                 break;
 
             rc = -EINVAL;
-            if ( mem_event_enable(d, ring_mfn, shared_mfn) != 0 )
+            if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 )
                 break;
 
             rc = 0;
@@ -279,7 +280,7 @@
 
         case XEN_DOMCTL_MEM_EVENT_OP_DISABLE:
         {
-            rc = mem_event_disable(d);
+            rc = mem_event_disable(&d->mem_event);
         }
         break;
 
diff -r 4815be3af73c -r ed7586b1d515 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Thu Sep 15 15:26:07 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Fri Sep 16 12:13:31 2011 +0100
@@ -281,12 +281,12 @@
     vcpu_pause_nosync(v);
     req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
-    if(mem_event_check_ring(d)) return page;
+    if(mem_event_check_ring(d, &d->mem_event)) return page;
 
     req.gfn = gfn;
     req.p2mt = p2m_ram_shared;
     req.vcpu_id = v->vcpu_id;
-    mem_event_put_request(d, &req);
+    mem_event_put_request(d, &d->mem_event, &req);
 
     return page;
 }
@@ -301,7 +301,7 @@
     mem_event_response_t rsp;
 
     /* Get request off the ring */
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Unpause domain/vcpu */
     if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 4815be3af73c -r ed7586b1d515 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Sep 15 15:26:07 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri Sep 16 12:13:31 2011 +0100
@@ -755,7 +755,7 @@
     mem_event_request_t req;
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d) == 0)
+    if ( mem_event_check_ring(d, &d->mem_event) == 0)
     {
         /* Send release notification to pager */
         memset(&req, 0, sizeof(req));
@@ -763,7 +763,7 @@
         req.gfn = gfn;
         req.vcpu_id = v->vcpu_id;
 
-        mem_event_put_request(d, &req);
+        mem_event_put_request(d, &d->mem_event, &req);
     }
 }
 
@@ -775,7 +775,7 @@
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d) )
+    if ( mem_event_check_ring(d, &d->mem_event) )
         return;
 
     memset(&req, 0, sizeof(req));
@@ -803,7 +803,7 @@
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        mem_event_put_req_producers(d);
+        mem_event_put_req_producers(&d->mem_event);
         return;
     }
 
@@ -812,7 +812,7 @@
     req.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &req);
+    mem_event_put_request(d, &d->mem_event, &req);
 }
 
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
@@ -842,7 +842,7 @@
     mfn_t mfn;
 
     /* Pull the response off the ring */
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -889,7 +889,7 @@
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the 
message along */
-    res = mem_event_check_ring(d);
+    res = mem_event_check_ring(d, &d->mem_event);
     if ( res < 0 ) 
     {
         /* No listener */
@@ -933,7 +933,7 @@
     
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &req);   
+    mem_event_put_request(d, &d->mem_event, &req);
 
     /* VCPU paused, mem event request sent */
 }
@@ -943,7 +943,7 @@
     struct domain *d = p2m->domain;
     mem_event_response_t rsp;
 
-    mem_event_get_response(d, &rsp);
+    mem_event_get_response(&d->mem_event, &rsp);
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff -r 4815be3af73c -r ed7586b1d515 xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h   Thu Sep 15 15:26:07 2011 +0100
+++ b/xen/include/asm-x86/mem_event.h   Fri Sep 16 12:13:31 2011 +0100
@@ -26,10 +26,10 @@
 
 /* Pauses VCPU while marking pause flag for mem event */
 void mem_event_mark_and_pause(struct vcpu *v);
-int mem_event_check_ring(struct domain *d);
-void mem_event_put_req_producers(struct domain *d);
-void mem_event_put_request(struct domain *d, mem_event_request_t *req);
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
+int mem_event_check_ring(struct domain *d, struct mem_event_domain *med);
+void mem_event_put_req_producers(struct mem_event_domain *med);
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, 
mem_event_request_t *req);
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t 
*rsp);
 void mem_event_unpause_vcpus(struct domain *d);
 
 int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] mem_event: pass mem_event_domain pointer to mem_event functions, Xen patchbot-unstable <=