[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH V2 08/10] arm/io: Use separate memory allocation for mmio handlers



The number of mmio handlers are limited to a compile time macro
MAX_IO_HANDLER which is 16. This number is not at all sufficient
to support per CPU distributor regions. Either it needs to be
increased to a bigger number, at least CONFIG_NR_CPUS+16, or
allocate a separate memory for mmio handlers dynamically during
domain build.

This patch uses the dynamic allocation strategy to reduce memory
footprint for 'struct domain' instead of static allocation.

Signed-off-by: Shanker Donthineni <shankerd@xxxxxxxxxxxxxx>
---
 xen/arch/arm/domain.c      |  6 ++++--
 xen/arch/arm/io.c          | 14 ++++++++++++--
 xen/include/asm-arm/mmio.h |  6 ++++--
 3 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 1365b4a..4010ff2 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -527,7 +527,7 @@ void vcpu_destroy(struct vcpu *v)
 int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                        struct xen_arch_domainconfig *config)
 {
-    int rc;
+    int rc, count;
 
     d->arch.relmem = RELMEM_not_started;
 
@@ -550,7 +550,8 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags,
     share_xen_page_with_guest(
         virt_to_page(d->shared_info), d, XENSHARE_writable);
 
-    if ( (rc = domain_io_init(d)) != 0 )
+    count = MAX_IO_HANDLER;
+    if ( (rc = domain_io_init(d, count)) != 0 )
         goto fail;
 
     if ( (rc = p2m_alloc_table(d)) != 0 )
@@ -644,6 +645,7 @@ void arch_domain_destroy(struct domain *d)
     free_xenheap_pages(d->arch.efi_acpi_table,
                        get_order_from_bytes(d->arch.efi_acpi_len));
 #endif
+    domain_io_free(d);
 }
 
 void arch_domain_shutdown(struct domain *d)
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 0156755..a5b2c2d 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -102,7 +102,7 @@ void register_mmio_handler(struct domain *d,
     struct vmmio *vmmio = &d->arch.vmmio;
     struct mmio_handler *handler;
 
-    BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER);
+    BUG_ON(vmmio->num_entries >= vmmio->max_num_entries);
 
     spin_lock(&vmmio->lock);
 
@@ -125,14 +125,24 @@ void register_mmio_handler(struct domain *d,
     spin_unlock(&vmmio->lock);
 }
 
-int domain_io_init(struct domain *d)
+int domain_io_init(struct domain *d, int max_count)
 {
    spin_lock_init(&d->arch.vmmio.lock);
    d->arch.vmmio.num_entries = 0;
 
+   d->arch.vmmio.max_num_entries = max_count;
+   d->arch.vmmio.handlers = xzalloc_array(struct mmio_handler, max_count);
+   if ( !d->arch.vmmio.handlers )
+       return -ENOMEM;
+
    return 0;
 }
 
+void domain_io_free(struct domain *d)
+{
+    xfree(d->arch.vmmio.handlers);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index da1cc2e..276b263 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -51,15 +51,17 @@ struct mmio_handler {
 
 struct vmmio {
     int num_entries;
+    int max_num_entries;
     spinlock_t lock;
-    struct mmio_handler handlers[MAX_IO_HANDLER];
+    struct mmio_handler *handlers;
 };
 
 extern int handle_mmio(mmio_info_t *info);
 void register_mmio_handler(struct domain *d,
                            const struct mmio_handler_ops *ops,
                            paddr_t addr, paddr_t size, void *priv);
-int domain_io_init(struct domain *d);
+int domain_io_init(struct domain *d, int max_count);
+void domain_io_free(struct domain *d);
 
 #endif  /* __ASM_ARM_MMIO_H__ */
 
-- 
Qualcomm Technologies, Inc. on behalf of Qualcomm Innovation Center, Inc. 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, 
a Linux Foundation Collaborative Project


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.