[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH V2 13/14] Infrastructure for manipulating 3-level event channel pages



NOTE: the registration call is always failed because other part of the code is
not yet completed.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/common/event_channel.c |  287 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 287 insertions(+)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index e8faf7d..54a847e 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -26,6 +26,7 @@
 #include <xen/compat.h>
 #include <xen/guest_access.h>
 #include <xen/keyhandler.h>
+#include <xen/paging.h>
 #include <asm/current.h>
 
 #include <public/xen.h>
@@ -1008,6 +1009,267 @@ out:
 }
 
 
+static long __map_l3_arrays(struct domain *d, xen_pfn_t *pending,
+                            xen_pfn_t *mask, int nr_pages)
+{
+    int rc;
+    void *mapping;
+    struct page_info *pginfo;
+    unsigned long gfn;
+    int pending_count = 0, mask_count = 0;
+
+#define __MAP(src, dst, cnt)                                    \
+    for ( (cnt) = 0; (cnt) < nr_pages; (cnt)++ )                \
+    {                                                           \
+        rc = -EINVAL;                                           \
+        gfn = (src)[(cnt)];                                     \
+        pginfo = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);    \
+        if ( !pginfo )                                          \
+            goto err;                                           \
+        if ( !get_page_type(pginfo, PGT_writable_page) )        \
+        {                                                       \
+            put_page(pginfo);                                   \
+            goto err;                                           \
+        }                                                       \
+        mapping = __map_domain_page_global(pginfo);             \
+        if ( !mapping )                                         \
+        {                                                       \
+            put_page_and_type(pginfo);                          \
+            rc = -ENOMEM;                                       \
+            goto err;                                           \
+        }                                                       \
+        (dst)[(cnt)] = mapping;                                 \
+    }
+
+    __MAP(pending, d->evtchn_pending, pending_count)
+    __MAP(mask, d->evtchn_mask, mask_count)
+#undef __MAP
+
+    rc = 0;
+
+ err:
+    return rc;
+}
+
+static void __unmap_l3_arrays(struct domain *d)
+{
+    int i;
+    unsigned long mfn;
+
+    for ( i = 0; i < EVTCHN_MAX_L3_PAGES; i++ )
+    {
+        if ( d->evtchn_pending[i] != 0 )
+        {
+            mfn = domain_page_map_to_mfn(d->evtchn_pending[i]);
+            unmap_domain_page_global(d->evtchn_pending[i]);
+            put_page_and_type(mfn_to_page(mfn));
+            d->evtchn_pending[i] = 0;
+        }
+        if ( d->evtchn_mask[i] != 0 )
+        {
+            mfn = domain_page_map_to_mfn(d->evtchn_mask[i]);
+            unmap_domain_page_global(d->evtchn_mask[i]);
+            put_page_and_type(mfn_to_page(mfn));
+            d->evtchn_mask[i] = 0;
+        }
+    }
+}
+
+static long __map_l2_selector(struct vcpu *v, unsigned long gfn,
+                              unsigned long off)
+{
+    void *mapping;
+    int rc;
+    struct page_info *page;
+    struct domain *d = v->domain;
+
+    rc = -EINVAL;   /* common errno for following operations */
+
+    /* Sanity check: L2 selector has maximum size of sizeof(unsigned
+     * long) * 8, this size is equal to the size of shared bitmap
+     * array of 2-level event channel. */
+    if ( off + sizeof(unsigned long) * 8 >= PAGE_SIZE )
+        goto out;
+
+    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+    if ( !page )
+        goto out;
+
+    if ( !get_page_type(page, PGT_writable_page) )
+    {
+        put_page(page);
+        goto out;
+    }
+
+    /* Use global mapping here, because we need to map selector for
+     * other vcpu (v != current). However this mapping is only used by
+     * v when guest is running. */
+    mapping = __map_domain_page_global(page);
+
+    if ( mapping == NULL )
+    {
+        put_page_and_type(page);
+        rc = -ENOMEM;
+        goto out;
+    }
+
+    v->evtchn_pending_sel_l2 = mapping + off;
+    rc = 0;
+
+ out:
+    return rc;
+}
+
+static void __unmap_l2_selector(struct vcpu *v)
+{
+    unsigned long mfn;
+
+    if ( v->evtchn_pending_sel_l2 )
+    {
+        mfn = domain_page_map_to_mfn(v->evtchn_pending_sel_l2);
+        unmap_domain_page_global(v->evtchn_pending_sel_l2);
+        put_page_and_type(mfn_to_page(mfn));
+        v->evtchn_pending_sel_l2 = NULL;
+    }
+}
+
+static void __evtchn_unmap_all_3level(struct domain *d)
+{
+    struct vcpu *v;
+    for_each_vcpu ( d, v )
+        __unmap_l2_selector(v);
+    __unmap_l3_arrays(d);
+}
+
+static void __evtchn_setup_bitmap_l3(struct domain *d)
+{
+    struct vcpu *v;
+
+    /* Easy way to setup 3-level bitmap, just move existing selector
+     * to next level then copy pending array and mask array */
+    for_each_vcpu ( d, v )
+    {
+        memcpy(&v->evtchn_pending_sel_l2[0],
+               &vcpu_info(v, evtchn_pending_sel),
+               sizeof(vcpu_info(v, evtchn_pending_sel)));
+        memset(&vcpu_info(v, evtchn_pending_sel), 0,
+               sizeof(vcpu_info(v, evtchn_pending_sel)));
+        set_bit(0, &vcpu_info(v, evtchn_pending_sel));
+    }
+
+    memcpy(d->evtchn_pending[0], &shared_info(d, evtchn_pending),
+           sizeof(shared_info(d, evtchn_pending)));
+    memcpy(d->evtchn_mask[0], &shared_info(d, evtchn_mask),
+           sizeof(shared_info(d, evtchn_mask)));
+}
+
+static long evtchn_register_3level(
+    XEN_GUEST_HANDLE_PARAM(evtchn_register_3level_t) arg)
+{
+    struct domain *d = current->domain;
+    struct evtchn_register_3level r;
+    struct vcpu *v;
+    int rc = 0;
+    xen_pfn_t *evtchn_pending = NULL;
+    xen_pfn_t *evtchn_mask = NULL;
+    xen_pfn_t *l2sel_mfns = NULL;
+    xen_pfn_t *l2sel_offsets = NULL;
+
+    if ( d->evtchn_level == EVTCHN_3_LEVEL )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    if ( copy_from_guest(&r, arg, 1) )
+    {
+        rc = -EFAULT;
+        goto out;
+    }
+
+    if ( r.nr_vcpus > d->max_vcpus ||
+         r.nr_pages > EVTCHN_MAX_L3_PAGES )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    evtchn_pending = xzalloc_array(xen_pfn_t, r.nr_pages);
+    evtchn_mask = xzalloc_array(xen_pfn_t, r.nr_pages);
+    l2sel_mfns = xzalloc_array(xen_pfn_t, r.nr_vcpus);
+    l2sel_offsets = xzalloc_array(xen_pfn_t, r.nr_vcpus);
+
+    if ( !evtchn_pending || !evtchn_mask ||
+         !l2sel_mfns || !l2sel_offsets )
+    {
+        rc = -ENOMEM;
+        goto out_free;
+    }
+
+#define __COPY_ARRAY(_d, _s, _nr)                       \
+    if ( copy_from_guest((_d), (_s), (_nr)) )           \
+    {                                                   \
+        rc = -EFAULT;                                   \
+        goto out_free;                                  \
+    }
+    __COPY_ARRAY(evtchn_pending, r.evtchn_pending, r.nr_pages)
+    __COPY_ARRAY(evtchn_mask, r.evtchn_mask, r.nr_pages)
+    __COPY_ARRAY(l2sel_mfns, r.l2sel_mfns, r.nr_vcpus)
+    __COPY_ARRAY(l2sel_offsets, r.l2sel_offsets, r.nr_vcpus)
+#undef __COPY_ARRAY
+
+    rc = __map_l3_arrays(d, evtchn_pending, evtchn_mask, r.nr_pages);
+    if ( rc )
+        goto out_free;
+
+    for_each_vcpu ( d, v )
+    {
+        if ( (rc = __map_l2_selector(v, l2sel_mfns[v->vcpu_id],
+                                     l2sel_offsets[v->vcpu_id])) )
+        {
+            __evtchn_unmap_all_3level(d);
+            goto out_free;
+        }
+    }
+
+    __evtchn_setup_bitmap_l3(d);
+
+    d->evtchn_level = EVTCHN_3_LEVEL;
+
+ out_free:
+    if ( evtchn_pending )
+        xfree(evtchn_pending);
+    if ( evtchn_mask )
+        xfree(evtchn_mask);
+    if ( l2sel_mfns )
+        xfree(l2sel_mfns);
+    if ( l2sel_offsets )
+        xfree(l2sel_offsets);
+ out:
+    return rc;
+}
+
+static long evtchn_register_nlevel(struct evtchn_register_nlevel *reg)
+{
+    struct domain *d = current->domain;
+    int rc;
+
+    spin_lock(&d->event_lock);
+
+    switch ( reg->level )
+    {
+    case EVTCHN_3_LEVEL:
+        rc = evtchn_register_3level(reg->u.l3);
+        break;
+    default:
+        rc = -EINVAL;
+    }
+
+    spin_unlock(&d->event_lock);
+
+    return rc;
+}
+
 long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     long rc;
@@ -1116,6 +1378,18 @@ long do_event_channel_op(int cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
+    case EVTCHNOP_register_nlevel: {
+        struct evtchn_register_nlevel reg;
+        if ( copy_from_guest(&reg, arg, 1) != 0 )
+            return -EFAULT;
+        rc = evtchn_register_nlevel(&reg);
+
+        /* XXX always fails this call because it is not yet completed */
+        rc = -EINVAL;
+
+        break;
+    }
+
     default:
         rc = -ENOSYS;
         break;
@@ -1245,6 +1519,17 @@ int evtchn_init(struct domain *d)
     return 0;
 }
 
+static void evtchn_unmap_nlevel(struct domain *d)
+{
+    switch ( d->evtchn_level )
+    {
+    case EVTCHN_3_LEVEL:
+        __evtchn_unmap_all_3level(d);
+        break;
+    default:
+        break;
+    }
+}
 
 void evtchn_destroy(struct domain *d)
 {
@@ -1273,6 +1558,8 @@ void evtchn_destroy(struct domain *d)
 
     clear_global_virq_handlers(d);
 
+    evtchn_unmap_nlevel(d);
+
     free_xenheap_page(d->evtchn);
 }
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.