WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-devel] [PATCH 7/7] xencomm take 3: linux side remove xencomm page s

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 7/7] xencomm take 3: linux side remove xencomm page size limit
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Tue, 14 Aug 2007 18:50:23 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx, xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Tue, 14 Aug 2007 02:54:06 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1185948239 -32400
# Node ID d2884d84d3fd5c52a6699eb7b98ffaf3b8f7a422
# Parent  7b88b56c310b11abe510cf32fc5095bc98979168
[linux, xencomm] remove xencomm page size limit.
Currently xencomm has page size limit so that a domain with many memory
(e.g. 100GB~) can't be created.

Now that xencomm of xen side accepts struct xencomm_desc whose address array
crosses page boundary. Thus it isn't necessary to allocate single page
not to cross page boundary. We can allocate exact sized memory.
Note that struct xencomm_desc can't cross page boundary and slab allocator
returns sizeof(void*) aligned pointer.
Where sizeof(*desc) > sizeof(void*), e.g. 32 bit environment,
the slab allocator return pointer doesn't gurantee that
struct xencomm_desc doesn't cross page boundary. So we fall back to
page allocator.
PATCHNAME: remove_xencomm_page_size_limit_common_code

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r 7b88b56c310b -r d2884d84d3fd drivers/xen/core/xencomm.c
--- a/drivers/xen/core/xencomm.c        Tue Aug 07 16:54:04 2007 +0900
+++ b/drivers/xen/core/xencomm.c        Wed Aug 01 15:03:59 2007 +0900
@@ -68,25 +68,54 @@ static int xencomm_init(struct xencomm_d
        return 0;
 }
 
-/* XXX use slab allocator */
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask)
-{
-       struct xencomm_desc *desc;
-
-       desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
-       if (desc == NULL)
-               return NULL;
-
-       desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
+static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
+                                         void *buffer, unsigned long bytes)
+{
+       struct xencomm_desc *desc;
+       unsigned long buffer_ulong = (unsigned long)buffer;
+       unsigned long start = buffer_ulong & PAGE_MASK;
+       unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
+       unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
+       unsigned long size = sizeof(*desc) +
+               sizeof(desc->address[0]) * nr_addrs;
+
+       /*
+        * slab allocator returns at least sizeof(void*) aligned pointer.
+        * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
+        * cross page boundary.
+        */
+       if (sizeof(*desc) > sizeof(void*)) {
+               unsigned long order = get_order(size);
+               desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
+                                                              order);
+               if (desc == NULL)
+                       return NULL;
+
+               desc->nr_addrs =
+                       ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
                        sizeof(*desc->address);
-
+       } else {
+               desc = kmalloc(size, gfp_mask);
+               if (desc == NULL)
+                       return NULL;
+
+               desc->nr_addrs = nr_addrs;
+       }
        return desc;
 }
 
 void xencomm_free(struct xencomm_handle *desc)
 {
-       if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG))
-               free_page((unsigned long)__va(desc));
+       if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
+               struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
+               if (sizeof(*desc__) > sizeof(void*)) {
+                       unsigned long size = sizeof(*desc__) +
+                               sizeof(desc__->address[0]) * desc__->nr_addrs;
+                       unsigned long order = get_order(size);
+                       free_pages((unsigned long)__va(desc), order);
+               } else
+                       kfree(__va(desc));
+       }
 }
 
 static int xencomm_create(void *buffer, unsigned long bytes, struct 
xencomm_desc **ret, gfp_t gfp_mask)
@@ -105,7 +134,7 @@ static int xencomm_create(void *buffer, 
 
        BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
 
-       desc = xencomm_alloc(gfp_mask);
+       desc = xencomm_alloc(gfp_mask, buffer, bytes);
        if (!desc) {
                printk("%s failure\n", "xencomm_alloc");
                return -ENOMEM;

Attachment: 159_d2884d84d3fd_remove_xencomm_page_size_limit_common_code.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 7/7] xencomm take 3: linux side remove xencomm page size limit, Isaku Yamahata <=