WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Core support for memory events.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Core support for memory events.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 16 Dec 2009 22:40:20 -0800
Delivery-date: Wed, 16 Dec 2009 22:41:54 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1261031275 0
# Node ID a1ab94c514b80b3f0845465752ed72cfe4fafa73
# Parent  49ad2a499edba92874590ecf72a7c7d216576099
Core support for memory events.
This includes enable/disable, ring functions, and vcpu pause/unpause.

Signed-off-by: Patrick Colp <Patrick.Colp@xxxxxxxxxx>
---
 xen/arch/x86/mm/Makefile        |    1 
 xen/arch/x86/mm/mem_event.c     |  199 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/mem_event.h |   68 +++++++++++++
 3 files changed, 268 insertions(+)

diff -r 49ad2a499edb -r a1ab94c514b8 xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile  Thu Dec 17 06:27:55 2009 +0000
+++ b/xen/arch/x86/mm/Makefile  Thu Dec 17 06:27:55 2009 +0000
@@ -6,6 +6,7 @@ obj-y += guest_walk_2.o
 obj-y += guest_walk_2.o
 obj-y += guest_walk_3.o
 obj-$(x86_64) += guest_walk_4.o
+obj-y += mem_event.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r 49ad2a499edb -r a1ab94c514b8 xen/arch/x86/mm/mem_event.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Thu Dec 17 06:27:55 2009 +0000
@@ -0,0 +1,199 @@
+/******************************************************************************
+ * arch/x86/mm/mem_event.c
+ *
+ * Memory event support.
+ *
+ * Copyright (c) 2009 Citrix (R)&D) Ltd. (Patrick Colp)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#include <xen/event.h>
+#include <asm/p2m.h>
+#include <asm/mem_event.h>
+
+
+#define xen_mb()   mb()
+#define xen_rmb()  rmb()
+#define xen_wmb()  wmb()
+
+
+#define MEM_EVENT_RING_THRESHOLD 4
+
+
+static void mem_event_notify(struct domain *d)
+{
+    prepare_wait_on_xen_event_channel(d->mem_event.xen_port);
+    notify_via_xen_event_channel(d->mem_event.xen_port);
+}
+
+
+int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+{
+    int rc;
+
+    /* Map ring and shared pages */
+    d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn));
+    if ( d->mem_event.ring_page == NULL )
+        goto err;
+
+    d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn));
+    if ( d->mem_event.shared_page == NULL )
+        goto err_ring;
+
+    /* Allocate event channel */
+    rc = alloc_unbound_xen_event_channel(d->vcpu[0],
+                                         current->domain->domain_id);
+    if ( rc < 0 )
+        goto err_shared;
+
+    ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc;
+    d->mem_event.xen_port = rc;
+
+    /* Initialise tasklet */
+    tasklet_init(&d->mem_event.tasklet,
+                 (void(*)(unsigned long))mem_event_notify,
+                 (unsigned long)d);
+
+    /* Prepare ring buffer */
+    FRONT_RING_INIT(&d->mem_event.front_ring,
+                    (mem_event_sring_t *)d->mem_event.ring_page,
+                    PAGE_SIZE);
+
+    mem_event_ring_lock_init(d);
+
+    d->mem_event.paused = 0;
+    d->mem_event.enabled = 1;
+
+    return 0;
+
+ err_shared:
+    unmap_domain_page(d->mem_event.shared_page);
+    d->mem_event.shared_page = NULL;
+ err_ring:
+    unmap_domain_page(d->mem_event.ring_page);
+    d->mem_event.ring_page = NULL;
+ err:
+    return 1;
+}
+
+int mem_event_disable(struct domain *d)
+{
+    d->mem_event.enabled = 0;
+    d->mem_event.paused = 0;
+
+    unmap_domain_page(d->mem_event.ring_page);
+    d->mem_event.ring_page = NULL;
+
+    unmap_domain_page(d->mem_event.shared_page);
+    d->mem_event.shared_page = NULL;
+
+    return 0;
+}
+
+void mem_event_put_request(struct domain *d, mem_event_request_t *req)
+{
+    mem_event_front_ring_t *front_ring;
+    RING_IDX req_prod;
+
+    mem_event_ring_lock(d);
+
+    front_ring = &d->mem_event.front_ring;
+    req_prod = front_ring->req_prod_pvt;
+
+    /* Copy request */
+    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
+    req_prod++;
+
+    /* Update ring */
+    front_ring->req_prod_pvt = req_prod;
+    RING_PUSH_REQUESTS(front_ring);
+
+    mem_event_ring_unlock(d);
+
+    tasklet_schedule(&d->mem_event.tasklet);
+}
+
+void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
+{
+    mem_event_front_ring_t *front_ring;
+    RING_IDX rsp_cons;
+
+    mem_event_ring_lock(d);
+
+    front_ring = &d->mem_event.front_ring;
+    rsp_cons = front_ring->rsp_cons;
+
+    /* Copy response */
+    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
+    rsp_cons++;
+
+    /* Update ring */
+    front_ring->rsp_cons = rsp_cons;
+    front_ring->sring->rsp_event = rsp_cons + 1;
+
+    mem_event_ring_unlock(d);
+}
+
+void mem_event_unpause_vcpus(struct domain *d)
+{
+    struct vcpu *v;
+
+    for_each_vcpu(d, v)
+    {
+        if ( d->mem_event.paused_vcpus[v->vcpu_id] )
+        {
+            vcpu_unpause(v);
+            d->mem_event.paused_vcpus[v->vcpu_id] = 0;
+        }
+    }
+}
+
+int mem_event_pause_vcpu(struct domain *d, struct vcpu *v)
+{
+    vcpu_pause_nosync(v);
+    d->mem_event.paused_vcpus[v->vcpu_id] = 1;
+
+    return 0;
+}
+
+int mem_event_check_ring(struct domain *d)
+{
+    int free_requests;
+    int ring_full;
+
+    mem_event_ring_lock(d);
+
+    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
+    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
+
+    if ( (current->domain->domain_id == d->domain_id) && ring_full )
+        mem_event_pause_vcpu(d, current);
+
+    mem_event_ring_unlock(d);
+
+    return ring_full;
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 49ad2a499edb -r a1ab94c514b8 xen/include/asm-x86/mem_event.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/mem_event.h   Thu Dec 17 06:27:55 2009 +0000
@@ -0,0 +1,68 @@
+/******************************************************************************
+ * include/asm-x86/mem_event.h
+ *
+ * Common interface for memory event support.
+ *
+ * Copyright (c) 2009 Citrix (R&D) Ltd. (Patrick Colp)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#ifndef __MEM_EVENT_H__
+#define __MEM_EVENT_H__
+
+
+/* Printouts */
+#define MEM_EVENT_PRINTK(_f, _a...)                                      \
+    debugtrace_printk("mem_event: %s(): " _f, __func__, ##_a)
+#define MEM_EVENT_ERROR(_f, _a...)                                       \
+    printk("mem_event error: %s(): " _f, __func__, ##_a)
+#define MEM_EVENT_DEBUG(flag, _f, _a...)                                 \
+    do {                                                                  \
+        if (MEM_EVENT_DEBUG_ ## flag)                                    \
+            debugtrace_printk("mem_event debug: %s(): " _f, __func__, ##_a); \
+    } while (0)
+
+
+#define mem_event_enabled(_d) (_d)->mem_event.enabled
+
+
+/* Ring lock */
+#define mem_event_ring_lock_init(_d)  
spin_lock_init(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock(_d)       spin_lock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_unlock(_d)     spin_unlock(&(_d)->mem_event.ring_lock)
+
+
+int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn);
+int mem_event_disable(struct domain *d);
+
+int mem_event_check_ring(struct domain *d);
+void mem_event_put_request(struct domain *d, mem_event_request_t *req);
+void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
+void mem_event_unpause_vcpus(struct domain *d);
+
+
+#endif /* __MEM_EVENT_H__ */
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Core support for memory events., Xen patchbot-unstable <=