[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 6/8] evtchn: alter internal object handling scheme



From: David Vrabel <david.vrabel@xxxxxxxxxx>

Originally, evtchn objects are stored in buckets. Now we add another
layer called group.  struct domain holds an array to evtchn groups,
then each group holds pointers to a bucket.

With this change, each domain can have more struct evtchn in a
space-efficient way.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>

Allocate the array of group pointers, shrinking the size of struct
domain.

Compile time calculate grouping and bucket parameters to achive
optimum packing into PAGE_SIZE memory allocations.

Signed-off-by: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx>
Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
 xen/common/event_channel.c |   29 ++++++++++++++++++++++++-----
 xen/include/xen/event.h    |   12 ++++++++++--
 xen/include/xen/sched.h    |   22 ++++++++++++++++++----
 3 files changed, 52 insertions(+), 11 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 126cf84..67dcdbc 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -124,6 +124,7 @@ static int virq_is_global(uint32_t virq)
 static int get_free_port(struct domain *d)
 {
     struct evtchn *chn;
+    struct evtchn **grp;
     int            port;
     int            i, j;
 
@@ -137,6 +138,15 @@ static int get_free_port(struct domain *d)
     if ( port == d->max_evtchns )
         return -ENOSPC;
 
+    if ( unlikely(group_from_port(d, port)  == NULL ) )
+    {
+        grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+        if ( unlikely(grp == NULL) )
+            return -ENOMEM;
+        else
+            group_from_port(d, port) = grp;
+    }
+
     chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
     if ( unlikely(chn == NULL) )
         return -ENOMEM;
@@ -1163,8 +1173,7 @@ int evtchn_init(struct domain *d)
     /* Default to N-level ABI. */
     evtchn_2l_init(d);
 
-    BUILD_BUG_ON(sizeof(struct evtchn *) * NR_EVTCHN_BUCKETS > PAGE_SIZE);
-    d->evtchn = xzalloc_array(struct evtchn *, NR_EVTCHN_BUCKETS);
+    d->evtchn = xzalloc_array(struct evtchn **, NR_EVTCHN_GROUPS);
     if ( d->evtchn == NULL )
         return -ENOMEM;
 
@@ -1192,7 +1201,7 @@ int evtchn_init(struct domain *d)
 
 void evtchn_destroy(struct domain *d)
 {
-    int i;
+    unsigned i, j;
 
     /* After this barrier no new event-channel allocations can occur. */
     BUG_ON(!d->is_dying);
@@ -1207,9 +1216,19 @@ void evtchn_destroy(struct domain *d)
 
     /* Free all event-channel buckets. */
     spin_lock(&d->event_lock);
-    for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
+    for ( i = 0; i < NR_EVTCHN_GROUPS; i++ )
     {
-        xsm_free_security_evtchn(d->evtchn[i]);
+        if ( d->evtchn[i] == NULL )
+            continue;
+
+        for ( j = 0; j < BUCKETS_PER_GROUP; j++ )
+        {
+            if ( d->evtchn[i][j] == NULL )
+                continue;
+            xsm_free_security_evtchn(d->evtchn[i][j]);
+            xfree(d->evtchn[i][j]);
+            d->evtchn[i][j] = NULL;
+        }
         xfree(d->evtchn[i]);
         d->evtchn[i] = NULL;
     }
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index a49697c..a795ae6 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -69,11 +69,19 @@ int guest_enabled_event(struct vcpu *v, uint32_t virq);
 /* Notify remote end of a Xen-attached event channel.*/
 void notify_via_xen_event_channel(struct domain *ld, int lport);
 
-/* Internal event channel object accessors */
+/*
+ * Internal event channel object storage:
+ * Objects are organized in two level scheme: group and bucket
+ * A group consists of several buckets, a bucket is an array of struct evtchn
+ */
+#define group_from_port(d,p) \
+    ((d)->evtchn[(p)/EVTCHNS_PER_GROUP])
+/* User should make sure group is not NULL */
 #define bucket_from_port(d,p) \
-    ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
+    ((group_from_port(d,p))[((p)%EVTCHNS_PER_GROUP)/EVTCHNS_PER_BUCKET])
 #define port_is_valid(d,p)    \
     (((p) >= 0) && ((p) < d->max_evtchns) && \
+    (group_from_port(d,p) != NULL) && \
      (bucket_from_port(d,p) != NULL))
 #define evtchn_from_port(d,p) \
     (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 3070555..f95cf99 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -50,9 +50,23 @@ extern struct domain *dom0;
 #else
 #define BITS_PER_EVTCHN_WORD(d) (has_32bit_shinfo(d) ? 32 : BITS_PER_XEN_ULONG)
 #endif
-#define MAX_EVTCHNS(d) (BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d))
-#define EVTCHNS_PER_BUCKET 128
-#define NR_EVTCHN_BUCKETS  (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
+
+#define BUCKETS_PER_GROUP  (PAGE_SIZE/sizeof(struct evtchn *))
+/* Round size of struct evtchn up to power of 2 size */
+#define b2(x)   (   (x) | (   (x) >> 1) )
+#define b4(x)   ( b2(x) | ( b2(x) >> 2) )
+#define b8(x)   ( b4(x) | ( b4(x) >> 4) )
+#define b16(x)  ( b8(x) | ( b8(x) >> 8) )
+#define b32(x)  (b16(x) | (b16(x) >>16) )
+#define next_power_of_2(x)      (b32(x-1) + 1)
+
+/* Maximum number of event channels for any ABI. */
+#define MAX_NR_EVTCHNS (max_t(unsigned, NR_EVENT_CHANNELS,  \
+                              1 << EVTCHN_FIFO_LINK_BITS))
+
+#define EVTCHNS_PER_BUCKET (PAGE_SIZE / next_power_of_2(sizeof(struct evtchn)))
+#define EVTCHNS_PER_GROUP  (BUCKETS_PER_GROUP * EVTCHNS_PER_BUCKET)
+#define NR_EVTCHN_GROUPS   DIV_ROUND_UP(MAX_NR_EVTCHNS, EVTCHNS_PER_GROUP)
 
 struct evtchn
 {
@@ -272,7 +286,7 @@ struct domain
     spinlock_t       rangesets_lock;
 
     /* Event channel information. */
-    struct evtchn  **evtchn;
+    struct evtchn ***evtchn;
     unsigned         max_evtchns;
     spinlock_t       event_lock;
     const struct evtchn_port_ops *evtchn_port_ops;
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.