[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V5 13/15] evtchn: infrastructure to manipulate 3-level event channel pages



Introduce evtchn_{,un}map_l3_bitmaps, evtchn_{,un}map_l2_selector for 3-level
event channel ABI.

Introduce evtchn_unregister_extended in teardown path.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/common/event_channel.c     |   72 +++++++++++++++++++++
 xen/common/evtchn_bitmap_abi.c |  135 ++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/event.h        |    7 +++
 xen/include/xen/sched.h        |    3 +
 4 files changed, 217 insertions(+)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 152b77a..d5b2e37 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -946,6 +946,53 @@ out:
 }
 
 
+static long evtchn_register_3level(evtchn_register_3level_t *arg)
+{
+    struct domain *d = current->domain;
+    int rc;
+
+    /*
+     * This domain must be in one of the two states:
+     *  a) it has no active extended ABI in use and tries to register
+     *     L3 bitmaps
+     *  b) it has activated 3-level ABI and tries to register L2
+     *     selector
+     */
+    if ( !((d->evtchn_extended  == EVTCHN_EXTENDED_NONE &&
+            arg->cmd == REGISTER_BITMAPS) ||
+           (d->evtchn_extended == EVTCHN_EXTENDED_L3 &&
+            arg->cmd == REGISTER_L2_SELECTOR)) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    switch ( arg->cmd )
+    {
+    case REGISTER_BITMAPS:
+        rc = evtchn_map_l3_bitmaps(d, arg);
+        break;
+    case REGISTER_L2_SELECTOR: {
+        int vcpu_id = arg->u.l2_selector.cpu_id;
+        struct vcpu *v;
+        if ( vcpu_id >= d->max_vcpus )
+            rc = -EINVAL;
+        else
+        {
+            v = d->vcpu[vcpu_id];
+            rc = evtchn_map_l2_selector(v, arg);
+        }
+        break;
+    }
+    default:
+        rc = -EINVAL;
+    }
+
+ out:
+    return rc;
+}
+
+
 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     long rc;
@@ -1063,6 +1110,14 @@ long do_event_channel_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
+    case EVTCHNOP_register_3level: {
+        struct evtchn_register_3level reg;
+        if ( copy_from_guest(&reg, arg, 1) != 0 )
+            return -EFAULT;
+        rc = evtchn_register_3level(&reg);
+        break;
+    }
+
     default:
         rc = -ENOSYS;
         break;
@@ -1202,6 +1257,21 @@ int evtchn_init(struct domain *d)
     return 0;
 }
 
+/* Clean up all extended event channel ABI mappings */
+static void evtchn_unregister_extended(struct domain *d)
+{
+    switch ( d->evtchn_extended )
+    {
+    case EVTCHN_EXTENDED_NONE:
+        /* Nothing to do */
+        break;
+    case EVTCHN_EXTENDED_L3:
+        evtchn_unmap_all_3level(d);
+        break;
+    default:
+        BUG();
+    }
+}
 
 void evtchn_destroy(struct domain *d)
 {
@@ -1239,6 +1309,8 @@ void evtchn_destroy(struct domain *d)
     spin_unlock(&d->event_lock);
 
     clear_global_virq_handlers(d);
+
+    evtchn_unregister_extended(d);
 }
 
 
diff --git a/xen/common/evtchn_bitmap_abi.c b/xen/common/evtchn_bitmap_abi.c
index 180a4bc..e358691 100644
--- a/xen/common/evtchn_bitmap_abi.c
+++ b/xen/common/evtchn_bitmap_abi.c
@@ -21,6 +21,141 @@
 #include <xen/errno.h>
 #include <xen/sched.h>
 #include <xen/event.h>
+#include <xen/guest_access.h>
+
+long evtchn_map_l3_bitmaps(struct domain *d, evtchn_register_3level_t *reg)
+{
+    int rc;
+    void *pending_mapping, *mask_mapping;
+    xen_pfn_t evtchn_pending[EVTCHN_MAX_L3_PAGES];
+    xen_pfn_t evtchn_mask[EVTCHN_MAX_L3_PAGES];
+    uint32_t nr_pages;
+
+    /* Return if we've mapped those bitmaps */
+    if ( d->evtchn_extended == EVTCHN_EXTENDED_L3 )
+        return -EBUSY;
+
+    nr_pages = reg->u.bitmaps.nr_pages;
+
+    if ( nr_pages > EVTCHN_MAX_L3_PAGES )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    memset(evtchn_pending, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES);
+    memset(evtchn_mask, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES);
+
+    rc = -EFAULT; /* common error code for following operations */
+    if ( copy_from_guest(evtchn_pending, reg->u.bitmaps.evtchn_pending,
+                         nr_pages) )
+        goto out;
+    if ( copy_from_guest(evtchn_mask, reg->u.bitmaps.evtchn_mask,
+                         nr_pages) )
+        goto out;
+
+    rc = -ENOMEM;
+    pending_mapping = vmap(evtchn_pending, nr_pages);
+    if ( !pending_mapping )
+        goto out;
+
+
+    mask_mapping = vmap(evtchn_mask, nr_pages);
+    if ( !mask_mapping )
+    {
+        vunmap(pending_mapping);
+        goto out;
+    }
+
+    d->evtchn_pending = pending_mapping;
+    d->evtchn_mask = mask_mapping;
+
+    evtchn_set_abi(d, EVTCHN_EXTENDED_L3);
+
+    rc = 0;
+ out:
+    return rc;
+}
+
+void evtchn_unmap_l3_bitmaps(struct domain *d)
+{
+    if ( d->evtchn_pending )
+    {
+        vunmap(d->evtchn_pending);
+        d->evtchn_pending = NULL;
+    }
+
+    if ( d->evtchn_mask )
+    {
+        vunmap(d->evtchn_mask);
+        d->evtchn_mask = NULL;
+    }
+
+    evtchn_set_abi(d, EVTCHN_EXTENDED_NONE);
+}
+
+long evtchn_map_l2_selector(struct vcpu *v, evtchn_register_3level_t *reg)
+{
+    int rc;
+    void *mapping;
+    xen_pfn_t mfn = 0;
+    xen_pfn_t offset = 0;
+
+    mfn = reg->u.l2_selector.mfn;
+    offset = reg->u.l2_selector.offset;
+
+    /* Already mapped? */
+    if ( v->evtchn_pending_sel_l2 )
+           return -EBUSY;
+
+    /* must within one page */
+    if ( offset + sizeof(xen_ulong_t)*sizeof(xen_ulong_t)*8 > PAGE_SIZE )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    mapping = vmap(&mfn, 1);
+
+    if ( mapping == NULL )
+    {
+        rc = -ENOMEM;
+        goto out;
+    }
+
+    v->evtchn_pending_sel_l2 = mapping + offset;
+
+    memcpy(&v->evtchn_pending_sel_l2[0],
+           &vcpu_info(v, evtchn_pending_sel),
+           sizeof(vcpu_info(v, evtchn_pending_sel)));
+    memset(&vcpu_info(v, evtchn_pending_sel), 0,
+           sizeof(vcpu_info(v, evtchn_pending_sel)));
+    set_bit(0, &vcpu_info(v, evtchn_pending_sel));
+
+    rc = 0;
+
+ out:
+    return rc;
+}
+
+void evtchn_unmap_l2_selector(struct vcpu *v)
+{
+    if ( v->evtchn_pending_sel_l2 )
+    {
+        unsigned long addr =
+            (unsigned long)(v->evtchn_pending_sel_l2) & PAGE_MASK;
+        vunmap((void *)addr);
+        v->evtchn_pending_sel_l2 = NULL;
+    }
+}
+
+void evtchn_unmap_all_3level(struct domain *d)
+{
+    struct vcpu *v;
+    for_each_vcpu ( d, v )
+        evtchn_unmap_l2_selector(v);
+    evtchn_unmap_l3_bitmaps(d);
+}
 
 int evtchn_bitmap_is_pending(struct domain *d, int port)
 {
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 2387461..182546c 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -145,6 +145,13 @@ void evtchn_bitmap_set_pending(struct vcpu *v, int port);
 void evtchn_bitmap_clear_pending(struct domain *d, int port);
 int evtchn_bitmap_unmask(unsigned int port);
 
+/* Functions used to manipulate 3-level event channel pages */
+long evtchn_map_l3_bitmaps(struct domain *d, evtchn_register_3level_t *reg);
+void evtchn_unmap_l3_bitmaps(struct domain *d);
+long evtchn_map_l2_selector(struct vcpu *v, evtchn_register_3level_t *reg);
+void evtchn_unmap_l2_selector(struct vcpu *v);
+void evtchn_unmap_all_3level(struct domain *d);
+
 /* A bitmap of supported extended event channel ABIs */
 extern uint64_t extended_event_channel;
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a4d9df7..8e330bf 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -109,6 +109,9 @@ struct vcpu
 
     struct domain   *domain;
 
+    /* For 3-level event channel ABI */
+    xen_ulong_t     *evtchn_pending_sel_l2;
+
     struct vcpu     *next_in_list;
 
     s_time_t         periodic_period;
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.