[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] xenbus: avoid stack overflow warning



The __xenbus_map_ring() function has two large arrays, 'map' and
'unmap' on its stack. When clang decides to inline it into its caller,
xenbus_map_ring_valloc_hvm(), the total stack usage exceeds the warning
limit for stack size on 32-bit architectures.

drivers/xen/xenbus/xenbus_client.c:592:12: error: stack frame size of 1104 
bytes in function 'xenbus_map_ring_valloc_hvm' [-Werror,-Wframe-larger-than=]

As far as I can tell, other compilers don't inline it here, so we get
no warning, but the stack usage is actually the same. It is possible
for both arrays to use the same location on the stack, but the compiler
cannot prove that this is safe because they get passed to external
functions that may end up using them until they go out of scope.

Move the two arrays into separate basic blocks to limit the scope
and force them to occupy less stack in total, regardless of the
inlining decision.

Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx>
---
 drivers/xen/xenbus/xenbus_client.c | 74 +++++++++++++++++-------------
 1 file changed, 41 insertions(+), 33 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
index 040d2a43e8e3..23ca70378e36 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -470,54 +470,62 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
                             unsigned int flags,
                             bool *leaked)
 {
-       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
-       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
        int i, j;
        int err = GNTST_okay;
 
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
+       {
+               struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
 
-       for (i = 0; i < nr_grefs; i++) {
-               memset(&map[i], 0, sizeof(map[i]));
-               gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
-                                 dev->otherend_id);
-               handles[i] = INVALID_GRANT_HANDLE;
-       }
+               if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+                       return -EINVAL;
 
-       gnttab_batch_map(map, i);
+               for (i = 0; i < nr_grefs; i++) {
+                       memset(&map[i], 0, sizeof(map[i]));
+                       gnttab_set_map_op(&map[i], addrs[i], flags,
+                                         gnt_refs[i], dev->otherend_id);
+                       handles[i] = INVALID_GRANT_HANDLE;
+               }
+
+               gnttab_batch_map(map, i);
 
-       for (i = 0; i < nr_grefs; i++) {
-               if (map[i].status != GNTST_okay) {
-                       err = map[i].status;
-                       xenbus_dev_fatal(dev, map[i].status,
+               for (i = 0; i < nr_grefs; i++) {
+                       if (map[i].status != GNTST_okay) {
+                               err = map[i].status;
+                               xenbus_dev_fatal(dev, map[i].status,
                                         "mapping in shared page %d from domain 
%d",
                                         gnt_refs[i], dev->otherend_id);
-                       goto fail;
-               } else
-                       handles[i] = map[i].handle;
+                               goto fail;
+                       } else
+                               handles[i] = map[i].handle;
+               }
        }
-
        return GNTST_okay;
 
  fail:
-       for (i = j = 0; i < nr_grefs; i++) {
-               if (handles[i] != INVALID_GRANT_HANDLE) {
-                       memset(&unmap[j], 0, sizeof(unmap[j]));
-                       gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
-                                           GNTMAP_host_map, handles[i]);
-                       j++;
+       {
+               struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+               for (i = j = 0; i < nr_grefs; i++) {
+                       if (handles[i] != INVALID_GRANT_HANDLE) {
+                               memset(&unmap[j], 0, sizeof(unmap[j]));
+                               gnttab_set_unmap_op(&unmap[j],
+                                                   (phys_addr_t)addrs[i],
+                                                   GNTMAP_host_map,
+                                                   handles[i]);
+                               j++;
+                       }
                }
-       }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
-               BUG();
+               if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+                                             unmap, j))
+                       BUG();
 
-       *leaked = false;
-       for (i = 0; i < j; i++) {
-               if (unmap[i].status != GNTST_okay) {
-                       *leaked = true;
-                       break;
+               *leaked = false;
+               for (i = 0; i < j; i++) {
+                       if (unmap[i].status != GNTST_okay) {
+                               *leaked = true;
+                               break;
+                       }
                }
        }
 
-- 
2.26.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.