On Tue, Jul 31, 2007 at 02:30:43PM -0600, Alex Williamson wrote:
> The 32bit restriction on this one is unfortunate, especially if we
> want to also apply this to the common xencomm code. IIRC, ppc supports
> 32bit guests. Maybe we need to keep a page allocation interface for
> 32bit and a smaller granularity allocation for 64bit? I think a ~100GB
> limit would be more than acceptable for a 32bit guest. Thanks,
Here is the patch for common code.
"if (sizeof(*desc) > sizeof(void*))" is somewhat ugly, but it should work.
I did only compile test. The patch needs review and test by PPC developer
before commit.
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1185948239 -32400
# Node ID 99bc011645da5b4a10bc5aa48f806f2993313a5e
# Parent 14380a7179e6a0d6759454132fa1f2135bd3b8e3
remove xencomm page size limit.
Currently xencomm has page size limit so that a domain with many memory
(e.g. 100GB~) can't be created.
Now that xencomm of xen side accepts struct xencomm_desc whose address array
crosses page boundary. Thus it isn't necessary to allocate single page
not to cross page boundary. We can allocate exact sized memory.
Note that struct xencomm_desc can't cross page boundary and slab allocator
returns sizeof(void*) aligned pointer.
Where sizeof(*desc) > sizeof(void*), e.g. 32 bit environment,
the slab allocator return pointer doesn't gurantee that
struct xencomm_desc doesn't cross page boundary. So we fall back to
page allocator.
PATCHNAME: remove_xencomm_page_size_limit_common_code
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r 14380a7179e6 -r 99bc011645da drivers/xen/core/xencomm.c
--- a/drivers/xen/core/xencomm.c Mon Jul 30 11:38:32 2007 +0900
+++ b/drivers/xen/core/xencomm.c Wed Aug 01 15:03:59 2007 +0900
@@ -65,25 +65,54 @@ static int xencomm_init(struct xencomm_d
return 0;
}
-/* XXX use slab allocator */
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
-
- desc = (struct xencomm_desc *)__get_free_page(gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = (PAGE_SIZE - sizeof(struct xencomm_desc)) /
+static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
+ void *buffer, unsigned long bytes)
+{
+ struct xencomm_desc *desc;
+ unsigned long buffer_ulong = (unsigned long)buffer;
+ unsigned long start = buffer_ulong & PAGE_MASK;
+ unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
+ unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
+ unsigned long size = sizeof(*desc) +
+ sizeof(desc->address[0]) * nr_addrs;
+
+ /*
+ * slab allocator returns at least sizeof(void*) aligned pointer.
+ * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
+ * cross page boundary.
+ */
+ if (sizeof(*desc) > sizeof(void*)) {
+ unsigned long order = get_order(size);
+ desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
+ order);
+ if (desc == NULL)
+ return NULL;
+
+ desc->nr_addrs =
+ ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
sizeof(*desc->address);
-
+ } else {
+ desc = kmalloc(size, gfp_mask);
+ if (desc == NULL)
+ return NULL;
+
+ desc->nr_addrs = nr_addrs;
+ }
return desc;
}
void xencomm_free(void *desc)
{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG))
- free_page((unsigned long)__va(desc));
+ if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
+ struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
+ if (sizeof(*desc__) > sizeof(void*)) {
+ unsigned long size = sizeof(*desc__) +
+ sizeof(desc__->address[0]) * desc__->nr_addrs;
+ unsigned long order = get_order(size);
+ free_pages((unsigned long)__va(desc), order);
+ } else
+ kfree(__va(desc));
+ }
}
static int xencomm_create(void *buffer, unsigned long bytes, struct
xencomm_desc **ret, gfp_t gfp_mask)
@@ -102,7 +131,7 @@ static int xencomm_create(void *buffer,
BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
- desc = xencomm_alloc(gfp_mask);
+ desc = xencomm_alloc(gfp_mask, buffer, bytes);
if (!desc) {
printk("%s failure\n", "xencomm_alloc");
return -ENOMEM;
--
yamahata
165_99bc011645da_remove_xencomm_page_size_limit_common_code.patch
Description: Text Data
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|