[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 15/16] tmem: refator function tmem_ensure_avail_pages()



On Wed, Nov 20, 2013 at 04:46:24PM +0800, Bob Liu wrote:
> tmem_ensure_avail_pages() doesn't return a value which is incorrect because
> the caller need to confirm there is enough memory.

Which it was not doing until now. Ouch.
> 
> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
>  xen/common/tmem.c          |   43 +++++++++++++++++++------------------------
>  xen/include/xen/tmem_xen.h |    6 ------
>  2 files changed, 19 insertions(+), 30 deletions(-)
> 
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 2e807d4..0bc33ee 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -1296,22 +1296,28 @@ static unsigned long tmem_relinquish_npages(unsigned 
> long n)
>      return avail_pages;
>  }
>  
> -/* Under certain conditions (e.g. if each client is putting pages for exactly
> +/*
> + * Under certain conditions (e.g. if each client is putting pages for exactly
>   * one object), once locks are held, freeing up memory may
>   * result in livelocks and very long "put" times, so we try to ensure there
>   * is a minimum amount of memory (1MB) available BEFORE any data structure
> - * locks are held */
> -static inline void tmem_ensure_avail_pages(void)
> + * locks are held.
> + */
> +static inline bool_t tmem_ensure_avail_pages(void)
>  {
>      int failed_evict = 10;
> +    unsigned long free_mem;
>  
> -    while ( !tmem_free_mb() )
> -    {
> -        if ( tmem_evict() )
> -            continue;
> -        else if ( failed_evict-- <= 0 )
> -            break;
> -    }
> +    do {
> +        free_mem = (tmem_page_list_pages + total_free_pages())
> +                        >> (20 - PAGE_SHIFT);
> +        if ( free_mem )
> +            return 1;
> +        if ( !tmem_evict() )
> +            failed_evict--;
> +    } while ( failed_evict > 0 );
> +
> +    return 0;
>  }
>  
>  /************ TMEM CORE OPERATIONS ************************************/
> @@ -2282,9 +2288,6 @@ long do_tmem_op(tmem_cli_op_t uops)
>      struct tmem_pool *pool = NULL;
>      struct oid *oidp;
>      int rc = 0;
> -    bool_t succ_get = 0, succ_put = 0;
> -    bool_t non_succ_get = 0, non_succ_put = 0;
> -    bool_t flush = 0, flush_obj = 0;

Um, OK, they seem to belong to a different patch?

>      bool_t write_lock_set = 0, read_lock_set = 0;
>  
>      if ( !tmem_initialized )
> @@ -2299,8 +2302,7 @@ long do_tmem_op(tmem_cli_op_t uops)
>      if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) )
>      {
>          tmem_client_err("tmem: can't get tmem struct from %s\n", 
> tmem_client_str);
> -        rc = -EFAULT;
> -     return rc;
> +        return -EFAULT;

This too.
>      }
>  
>      if ( op.cmd == TMEM_CONTROL )
> @@ -2369,28 +2371,21 @@ long do_tmem_op(tmem_cli_op_t uops)
>                                op.u.creat.uuid[0], op.u.creat.uuid[1]);
>          break;
>      case TMEM_PUT_PAGE:
> -        tmem_ensure_avail_pages();
> -        rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
> +        if ( tmem_ensure_avail_pages() )
> +            rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
>                          tmem_cli_buf_null);
> -        if (rc == 1) succ_put = 1;
> -        else non_succ_put = 1;

which we ignore..
>          break;
>      case TMEM_GET_PAGE:
>          rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn,
>                          tmem_cli_buf_null);
> -        if (rc == 1) succ_get = 1;
> -        else non_succ_get = 1;

ugh.
>          break;
>      case TMEM_FLUSH_PAGE:
> -        flush = 1;
>          rc = do_tmem_flush_page(pool, oidp, op.u.gen.index);
>          break;
>      case TMEM_FLUSH_OBJECT:
>          rc = do_tmem_flush_object(pool, oidp);
> -        flush_obj = 1;
>          break;
>      case TMEM_DESTROY_POOL:
> -        flush = 1;

Those really belong to a seperate cleanup patch.

>          rc = do_tmem_destroy_pool(op.pool_id);
>          break;
>      default:
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index bf3be62..258e8e7 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -164,13 +164,7 @@ static inline void __tmem_free_page(struct page_info *pi)
>      atomic_dec(&freeable_page_count);
>  }
>  
> -static inline unsigned long tmem_free_mb(void)
> -{
> -    return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT);
> -}
> -
>  /*  "Client" (==domain) abstraction */
> -
>  struct client;
>  static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
>  {
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.