WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] RE: [PATCH] fix domain reference leaks

To: Jan Beulich <JBeulich@xxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] RE: [PATCH] fix domain reference leaks
From: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
Date: Thu, 11 Feb 2010 13:41:03 -0800 (PST)
Cc:
Delivery-date: Thu, 11 Feb 2010 13:41:39 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4B717207020000780002E766@xxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <4B717207020000780002E766@xxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
It has my blessing (though I see Keir already applied it :-)

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxxxx]
> Sent: Tuesday, February 09, 2010 6:33 AM
> To: xen-devel@xxxxxxxxxxxxxxxxxxx
> Cc: Dan Magenheimer
> Subject: [PATCH] fix domain reference leaks
> 
> Besides two unlikely/rarely hit ones in x86 code, the main offender
> was tmh_client_from_cli_id(), which didn't even have a counterpart
> (albeit it had a comment correctly saying that it causes d->refcnt to
> get incremented). Unfortunately(?) this required a bit of code
> restructuring (as I needed to change the code anyway, I also fixed
> a couple os missing bounds checks which would sooner or later be
> reported as security vulnerabilities), so I would hope Dan could give
> it his blessing before it gets applied.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
> Cc: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>
> 
> --- 2010-02-09.orig/xen/arch/x86/debug.c      2010-02-09 10:50:02.000000000
> +0100
> +++ 2010-02-09/xen/arch/x86/debug.c   2010-02-09 09:24:58.000000000 +0100
> @@ -252,10 +252,11 @@ dbg_rw_mem(dbgva_t addr, dbgbyte_t *buf,
>          else
>              len = __copy_from_user(buf, (void *)addr, len);
>      }
> -    else
> +    else if ( dp )
>      {
> -        if ( dp && !dp->is_dying )   /* make sure guest is still there
> */
> +        if ( !dp->is_dying )   /* make sure guest is still there */
>              len= dbg_rw_guest_mem(addr, buf, len, dp, toaddr, pgd3);
> +        put_domain(dp);
>      }
> 
>      DBGP2("gmem:exit:len:$%d\n", len);
> --- 2010-02-09.orig/xen/arch/x86/mm.c 2010-01-06 11:22:26.000000000
> +0100
> +++ 2010-02-09/xen/arch/x86/mm.c      2010-02-09 09:39:36.000000000 +0100
> @@ -3805,6 +3805,7 @@ int steal_page(
>      struct domain *d, struct page_info *page, unsigned int memflags)
>  {
>      unsigned long x, y;
> +    bool_t drop_dom_ref = 0;
> 
>      spin_lock(&d->page_alloc_lock);
> 
> @@ -3832,11 +3833,13 @@ int steal_page(
>      } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
> 
>      /* Unlink from original owner. */
> -    if ( !(memflags & MEMF_no_refcount) )
> -        d->tot_pages--;
> +    if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages )
> +        drop_dom_ref = 1;
>      page_list_del(page, &d->page_list);
> 
>      spin_unlock(&d->page_alloc_lock);
> +    if ( unlikely(drop_dom_ref) )
> +        put_domain(d);
>      return 0;
> 
>   fail:
> --- 2010-02-09.orig/xen/common/tmem.c 2010-02-09 10:50:02.000000000
> +0100
> +++ 2010-02-09/xen/common/tmem.c      2010-02-09 11:40:21.000000000 +0100
> @@ -912,14 +912,14 @@ static client_t *client_create(cli_id_t
>          return NULL;
>      }
>      memset(client,0,sizeof(client_t));
> -    if ( (client->tmh = tmh_client_init()) == NULL )
> +    if ( (client->tmh = tmh_client_init(cli_id)) == NULL )
>      {
>          printk("failed... can't allocate host-dependent part of
> client\n");
>          if ( client )
>              tmh_free_infra(client);
>          return NULL;
>      }
> -    tmh_set_client_from_id(client,cli_id);
> +    tmh_set_client_from_id(client, client->tmh, cli_id);
>      client->cli_id = cli_id;
>  #ifdef __i386__
>      client->compress = 0;
> @@ -1528,7 +1528,7 @@ static NOINLINE int do_tmem_destroy_pool
>  }
> 
>  static NOINLINE int do_tmem_new_pool(cli_id_t this_cli_id,
> -                                     uint32_t this_pool_id, uint32_t
> flags,
> +                                     uint32_t d_poolid, uint32_t
> flags,
>                                       uint64_t uuid_lo, uint64_t
> uuid_hi)
>  {
>      client_t *client;
> @@ -1540,19 +1540,13 @@ static NOINLINE int do_tmem_new_pool(cli
>      int specversion = (flags >> TMEM_POOL_VERSION_SHIFT)
>           & TMEM_POOL_VERSION_MASK;
>      pool_t *pool, *shpool;
> -    int s_poolid, d_poolid, first_unused_s_poolid;
> +    int s_poolid, first_unused_s_poolid;
>      int i;
> 
>      if ( this_cli_id == CLI_ID_NULL )
> -    {
> -        client = tmh_client_from_current();
>          cli_id = tmh_get_cli_id_from_current();
> -    } else {
> -        if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL)
> -            return -EPERM;
> +    else
>          cli_id = this_cli_id;
> -    }
> -    ASSERT(client != NULL);
>      printk("tmem: allocating %s-%s tmem pool for %s=%d...",
>          persistent ? "persistent" : "ephemeral" ,
>          shared ? "shared" : "private", cli_id_str, cli_id);
> @@ -1573,19 +1567,24 @@ static NOINLINE int do_tmem_new_pool(cli
>      }
>      if ( this_cli_id != CLI_ID_NULL )
>      {
> -        d_poolid = this_pool_id;
> -        if ( client->pools[d_poolid] != NULL )
> -            return -EPERM;
> -        d_poolid = this_pool_id;
> +        if ( (client = tmh_client_from_cli_id(this_cli_id)) == NULL
> +             || d_poolid >= MAX_POOLS_PER_DOMAIN
> +             || client->pools[d_poolid] != NULL )
> +            goto fail;
>      }
> -    else for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN;
> d_poolid++ )
> -        if ( client->pools[d_poolid] == NULL )
> -            break;
> -    if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
> +    else
>      {
> -        printk("failed... no more pool slots available for this %s\n",
> -            client_str);
> -        goto fail;
> +        client = tmh_client_from_current();
> +        ASSERT(client != NULL);
> +        for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN;
> d_poolid++ )
> +            if ( client->pools[d_poolid] == NULL )
> +                break;
> +        if ( d_poolid >= MAX_POOLS_PER_DOMAIN )
> +        {
> +            printk("failed... no more pool slots available for this
> %s\n",
> +                   client_str);
> +            goto fail;
> +        }
>      }
>      if ( shared )
>      {
> @@ -1618,6 +1617,8 @@ static NOINLINE int do_tmem_new_pool(cli
>                      client->pools[d_poolid] =
> global_shared_pools[s_poolid];
>                      shared_pool_join(global_shared_pools[s_poolid],
> client);
>                      pool_free(pool);
> +                    if ( this_cli_id != CLI_ID_NULL )
> +                        tmh_client_put(client->tmh);
>                      return d_poolid;
>                  }
>              }
> @@ -1638,6 +1639,8 @@ static NOINLINE int do_tmem_new_pool(cli
>          }
>      }
>      client->pools[d_poolid] = pool;
> +    if ( this_cli_id != CLI_ID_NULL )
> +        tmh_client_put(client->tmh);
>      list_add_tail(&pool->pool_list, &global_pool_list);
>      pool->pool_id = d_poolid;
>      pool->persistent = persistent;
> @@ -1647,6 +1650,8 @@ static NOINLINE int do_tmem_new_pool(cli
> 
>  fail:
>      pool_free(pool);
> +    if ( this_cli_id != CLI_ID_NULL )
> +        tmh_client_put(client->tmh);
>      return -EPERM;
>  }
> 
> @@ -1672,6 +1677,7 @@ static int tmemc_freeze_pools(cli_id_t c
>          if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
>              return -1;
>          client_freeze(client,freeze);
> +        tmh_client_put(client->tmh);
>          printk("tmem: all pools %s for %s=%d\n",s,cli_id_str,cli_id);
>      }
>      return 0;
> @@ -1876,8 +1882,10 @@ static int tmemc_list(cli_id_t cli_id, t
>      }
>      else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
>          return -1;
> -    else
> +    else {
>          off = tmemc_list_client(client, buf, 0, len, use_long);
> +        tmh_client_put(client->tmh);
> +    }
> 
>      return 0;
>  }
> @@ -1925,7 +1933,10 @@ static int tmemc_set_var(cli_id_t cli_id
>      else if ( (client = tmh_client_from_cli_id(cli_id)) == NULL)
>          return -1;
>      else
> -            tmemc_set_var_one(client, subop, arg1);
> +    {
> +        tmemc_set_var_one(client, subop, arg1);
> +        tmh_client_put(client->tmh);
> +    }
>      return 0;
>  }
> 
> @@ -1941,6 +1952,8 @@ static NOINLINE int tmemc_shared_pool_au
>          return 1;
>      }
>      client = tmh_client_from_cli_id(cli_id);
> +    if ( client == NULL )
> +        return -EINVAL;
>      for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++)
>      {
>          if ( (client->shared_auth_uuid[i][0] == uuid_lo) &&
> @@ -1949,6 +1962,7 @@ static NOINLINE int tmemc_shared_pool_au
>              if ( auth == 0 )
>                  client->shared_auth_uuid[i][0] =
>                      client->shared_auth_uuid[i][1] = -1L;
> +            tmh_client_put(client->tmh);
>              return 1;
>          }
>          if ( (auth == 1) && (client->shared_auth_uuid[i][0] == -1L) &&
> @@ -1956,11 +1970,15 @@ static NOINLINE int tmemc_shared_pool_au
>              free = i;
>      }
>      if ( auth == 0 )
> +    {
> +        tmh_client_put(client->tmh);
>          return 0;
> +    }
>      if ( auth == 1 && free == -1 )
>          return -ENOMEM;
>      client->shared_auth_uuid[free][0] = uuid_lo;
>      client->shared_auth_uuid[free][1] = uuid_hi;
> +    tmh_client_put(client->tmh);
>      return 1;
>  }
> 
> @@ -1968,10 +1986,12 @@ static NOINLINE int tmemc_save_subop(int
>                          uint32_t subop, tmem_cli_va_t buf, uint32_t
> arg1)
>  {
>      client_t *client = tmh_client_from_cli_id(cli_id);
> -    pool_t *pool =  (client == NULL) ? NULL : client->pools[pool_id];
> +    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
> +                   ? NULL : client->pools[pool_id];
>      uint32_t p;
>      uint64_t *uuid;
>      pgp_t *pgp, *pgp2;
> +    int rc = -1;
> 
>      switch(subop)
>      {
> @@ -1982,45 +2002,55 @@ static NOINLINE int tmemc_save_subop(int
>              if ( client->pools[p] != NULL )
>                  break;
>          if ( p == MAX_POOLS_PER_DOMAIN )
> -            return 0;
> +        {
> +            rc = 0;
> +            break;
> +        }
>          client->was_frozen = client->frozen;
>          client->frozen = 1;
>          if ( arg1 != 0 )
>              client->live_migrating = 1;
> -        return 1;
> +        rc = 1;
> +        break;
>      case TMEMC_RESTORE_BEGIN:
> -        ASSERT(client == NULL);
> -        if ( (client = client_create(cli_id)) == NULL )
> -            return -1;
> -        return 1;
> +        if ( client == NULL && (client = client_create(cli_id)) !=
> NULL )
> +            return 1;
> +        break;
>      case TMEMC_SAVE_GET_VERSION:
> -        return TMEM_SPEC_VERSION;
> +        rc = TMEM_SPEC_VERSION;
> +        break;
>      case TMEMC_SAVE_GET_MAXPOOLS:
> -        return MAX_POOLS_PER_DOMAIN;
> +        rc = MAX_POOLS_PER_DOMAIN;
> +        break;
>      case TMEMC_SAVE_GET_CLIENT_WEIGHT:
> -        return client->weight == -1 ? -2 : client->weight;
> +        rc = client->weight == -1 ? -2 : client->weight;
> +        break;
>      case TMEMC_SAVE_GET_CLIENT_CAP:
> -        return client->cap == -1 ? -2 : client->cap;
> +        rc = client->cap == -1 ? -2 : client->cap;
> +        break;
>      case TMEMC_SAVE_GET_CLIENT_FLAGS:
> -        return (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) |
> -               (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
> +        rc = (client->compress ? TMEM_CLIENT_COMPRESS : 0 ) |
> +             (client->was_frozen ? TMEM_CLIENT_FROZEN : 0 );
> +        break;
>      case TMEMC_SAVE_GET_POOL_FLAGS:
>           if ( pool == NULL )
> -             return -1;
> -         return (pool->persistent ? TMEM_POOL_PERSIST : 0) |
> -                (pool->shared ? TMEM_POOL_SHARED : 0) |
> -                (pool->pageshift << TMEM_POOL_PAGESIZE_SHIFT);
> +             break;
> +         rc = (pool->persistent ? TMEM_POOL_PERSIST : 0) |
> +              (pool->shared ? TMEM_POOL_SHARED : 0) |
> +              (pool->pageshift << TMEM_POOL_PAGESIZE_SHIFT);
> +        break;
>      case TMEMC_SAVE_GET_POOL_NPAGES:
>           if ( pool == NULL )
> -             return -1;
> -        return _atomic_read(pool->pgp_count);
> +             break;
> +        rc = _atomic_read(pool->pgp_count);
> +        break;
>      case TMEMC_SAVE_GET_POOL_UUID:
>           if ( pool == NULL )
> -             return -1;
> +             break;
>          uuid = (uint64_t *)buf.p;
>          *uuid++ = pool->uuid[0];
>          *uuid = pool->uuid[1];
> -        return 0;
> +        rc = 0;
>      case TMEMC_SAVE_END:
>          client->live_migrating = 0;
>          if ( !list_empty(&client->persistent_invalidated_list) )
> @@ -2028,27 +2058,34 @@ static NOINLINE int tmemc_save_subop(int
>                &client->persistent_invalidated_list, client_inv_pages)
>                  pgp_free_from_inv_list(client,pgp);
>          client->frozen = client->was_frozen;
> -        return 0;
> +        rc = 0;
>      }
> -    return -1;
> +    if ( client )
> +        tmh_client_put(client->tmh);
> +    return rc;
>  }
> 
>  static NOINLINE int tmemc_save_get_next_page(int cli_id, int pool_id,
>                          tmem_cli_va_t buf, uint32_t bufsize)
>  {
>      client_t *client = tmh_client_from_cli_id(cli_id);
> -    pool_t *pool =  (client == NULL) ? NULL : client->pools[pool_id];
> +    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
> +                   ? NULL : client->pools[pool_id];
>      pgp_t *pgp;
>      int ret = 0;
>      struct tmem_handle *h;
>      unsigned int pagesize = 1 << (pool->pageshift+12);
> 
> -    if ( pool == NULL )
> -        return -1;
> -    if ( is_ephemeral(pool) )
> +    if ( pool == NULL || is_ephemeral(pool) )
> +    {
> +        tmh_client_put(client->tmh);
>          return -1;
> +    }
>      if ( bufsize < pagesize + sizeof(struct tmem_handle) )
> +    {
> +        tmh_client_put(client->tmh);
>          return -ENOMEM;
> +    }
> 
>      tmem_spin_lock(&pers_lists_spinlock);
>      if ( list_empty(&pool->persistent_page_list) )
> @@ -2080,6 +2117,7 @@ static NOINLINE int tmemc_save_get_next_
> 
>  out:
>      tmem_spin_unlock(&pers_lists_spinlock);
> +    tmh_client_put(client->tmh);
>      return ret;
>  }
> 
> @@ -2094,7 +2132,10 @@ static NOINLINE int tmemc_save_get_next_
>      if ( client == NULL )
>          return 0;
>      if ( bufsize < sizeof(struct tmem_handle) )
> +    {
> +        tmh_client_put(client->tmh);
>          return 0;
> +    }
>      tmem_spin_lock(&pers_lists_spinlock);
>      if ( list_empty(&client->persistent_invalidated_list) )
>          goto out;
> @@ -2121,6 +2162,7 @@ static NOINLINE int tmemc_save_get_next_
>      ret = 1;
>  out:
>      tmem_spin_unlock(&pers_lists_spinlock);
> +    tmh_client_put(client->tmh);
>      return ret;
>  }
> 
> @@ -2128,22 +2170,26 @@ static int tmemc_restore_put_page(int cl
>                        uint32_t index, tmem_cli_va_t buf, uint32_t
> bufsize)
>  {
>      client_t *client = tmh_client_from_cli_id(cli_id);
> -    pool_t *pool =  (client == NULL) ? NULL : client->pools[pool_id];
> +    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
> +                   ? NULL : client->pools[pool_id];
> +    int rc = pool ? do_tmem_put(pool,oid,index,0,0,0,bufsize,buf.p) :
> -1;
> 
> -    if ( pool == NULL )
> -        return -1;
> -    return do_tmem_put(pool,oid,index,0,0,0,bufsize,buf.p);
> +    if ( client )
> +        tmh_client_put(client->tmh);
> +    return rc;
>  }
> 
>  static int tmemc_restore_flush_page(int cli_id, int pool_id, uint64_t
> oid,
>                          uint32_t index)
>  {
>      client_t *client = tmh_client_from_cli_id(cli_id);
> -    pool_t *pool =  (client == NULL) ? NULL : client->pools[pool_id];
> +    pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN)
> +                   ? NULL : client->pools[pool_id];
> +    int rc = pool ? do_tmem_flush_page(pool, oid, index) : -1;
> 
> -    if ( pool == NULL )
> -        return -1;
> -    return do_tmem_flush_page(pool, oid, index);
> +    if ( client )
> +        tmh_client_put(client->tmh);
> +    return rc;
>  }
> 
>  static NOINLINE int do_tmem_control(struct tmem_op *op)
> --- 2010-02-09.orig/xen/common/tmem_xen.c     2010-02-09 10:50:02.000000000
> +0100
> +++ 2010-02-09/xen/common/tmem_xen.c  2010-02-09 10:03:16.000000000
> +0100
> @@ -290,17 +290,16 @@ static void tmh_persistent_pool_page_put
> 
>  /******************  XEN-SPECIFIC CLIENT HANDLING
> ********************/
> 
> -EXPORT tmh_client_t *tmh_client_init(void)
> +EXPORT tmh_client_t *tmh_client_init(cli_id_t cli_id)
>  {
>      tmh_client_t *tmh;
>      char name[5];
> -    domid_t domid = current->domain->domain_id;
>      int i, shift;
> 
>      if ( (tmh = xmalloc(tmh_client_t)) == NULL )
>          return NULL;
>      for (i = 0, shift = 12; i < 4; shift -=4, i++)
> -        name[i] = (((unsigned short)domid >> shift) & 0xf) + '0';
> +        name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
>      name[4] = '\0';
>  #ifndef __i386__
>      tmh->persistent_pool = xmem_pool_create(name,
> tmh_persistent_pool_page_get,
> @@ -311,7 +310,6 @@ EXPORT tmh_client_t *tmh_client_init(voi
>          return NULL;
>      }
>  #endif
> -    tmh->domain = current->domain;
>      return tmh;
>  }
> 
> @@ -321,6 +319,7 @@ EXPORT void tmh_client_destroy(tmh_clien
>      xmem_pool_destroy(tmh->persistent_pool);
>  #endif
>      put_domain(tmh->domain);
> +    tmh->domain = NULL;
>  }
> 
>  /******************  XEN-SPECIFIC HOST INITIALIZATION
> ********************/
> --- 2010-02-09.orig/xen/include/xen/tmem_xen.h        2010-02-09
> 10:50:02.000000000 +0100
> +++ 2010-02-09/xen/include/xen/tmem_xen.h     2010-02-09 11:39:13.000000000
> +0100
> @@ -43,8 +43,6 @@ extern rwlock_t tmem_rwlock;
> 
>  extern void tmh_copy_page(char *to, char*from);
>  extern int tmh_init(void);
> -extern tmh_client_t *tmh_client_init(void);
> -extern void tmh_client_destroy(tmh_client_t *);
>  #define tmh_hash hash_long
> 
>  extern void tmh_release_avail_pages_to_host(void);
> @@ -281,6 +279,9 @@ typedef domid_t cli_id_t;
>  typedef struct domain tmh_cli_ptr_t;
>  typedef struct page_info pfp_t;
> 
> +extern tmh_client_t *tmh_client_init(cli_id_t);
> +extern void tmh_client_destroy(tmh_client_t *);
> +
>  /* this appears to be unreliable when a domain is being shut down */
>  static inline struct client *tmh_client_from_cli_id(cli_id_t cli_id)
>  {
> @@ -290,6 +291,11 @@ static inline struct client *tmh_client_
>      return (struct client *)(d->tmem);
>  }
> 
> +static inline void tmh_client_put(tmh_client_t *tmh)
> +{
> +    put_domain(tmh->domain);
> +}
> +
>  static inline struct client *tmh_client_from_current(void)
>  {
>      return (struct client *)(current->domain->tmem);
> @@ -307,10 +313,12 @@ static inline tmh_cli_ptr_t *tmh_get_cli
>      return current->domain;
>  }
> 
> -static inline void tmh_set_client_from_id(struct client
> *client,cli_id_t cli_id)
> +static inline void tmh_set_client_from_id(struct client *client,
> +                                          tmh_client_t *tmh, cli_id_t
> cli_id)
>  {
>      struct domain *d = get_domain_by_id(cli_id);
>      d->tmem = client;
> +    tmh->domain = d;
>  }
> 
>  static inline bool_t tmh_current_is_privileged(void)
> 
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>