[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 10/16] tmem: cleanup: drop runtime statistics



On Wed, Nov 20, 2013 at 04:46:19PM +0800, Bob Liu wrote:
> They are so many runtime statistics which are unreadable and hard to maintain.
> This patch drop them temporary for code cleanup until we have clear define 
> what
> runtime statistics are really needed.

We should really also have a TODO so that it is not forgotton.
> 
> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
>  xen/common/tmem.c |  187 
> ++++-------------------------------------------------
>  1 file changed, 13 insertions(+), 174 deletions(-)
> 
> diff --git a/xen/common/tmem.c b/xen/common/tmem.c
> index 48b67bf..c1b3e21 100644
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -26,27 +26,7 @@
>  
>  #define TMEM_SPEC_VERSION 1
>  
> -/* global statistics (none need to be locked) */
> -static unsigned long total_tmem_ops = 0;
> -static unsigned long errored_tmem_ops = 0;
> -static unsigned long alloc_failed = 0, alloc_page_failed = 0;
> -static unsigned long evicted_pgs = 0, evict_attempts = 0;
> -static unsigned long relinq_pgs = 0, relinq_attempts = 0;
> -static unsigned long max_evicts_per_relinq = 0;
> -static unsigned long deduped_puts = 0;
> -static unsigned long tot_good_eph_puts = 0;
> -static int global_obj_count_max = 0;
> -static int global_pgp_count_max = 0;
> -static int global_pcd_count_max = 0;
> -static int global_page_count_max = 0;
> -static int global_rtree_node_count_max = 0;
> -static long global_eph_count_max = 0;
> -static unsigned long failed_copies;
> -static unsigned long pcd_tot_tze_size = 0;
> -static unsigned long pcd_tot_csize = 0;
> -
>  /************ CORE DATA STRUCTURES ************************************/
> -
>  #define MAX_POOLS_PER_DOMAIN 16
>  #define MAX_GLOBAL_SHARED_POOLS  16
>  
> @@ -59,7 +39,7 @@ struct client {
>      struct domain *domain;
>      struct xmem_pool *persistent_pool;
>      struct list_head ephemeral_page_list;
> -    long eph_count, eph_count_max;
> +    long eph_count;
>      domid_t cli_id;
>      uint32_t weight;
>      uint32_t cap;
> @@ -71,12 +51,6 @@ struct client {
>      bool_t was_frozen;
>      struct list_head persistent_invalidated_list;
>      struct tmem_page_descriptor *cur_pgp;
> -    /* statistics collection */
> -    unsigned long compress_poor, compress_nomem;
> -    unsigned long compressed_pages;
> -    uint64_t compressed_sum_size;
> -    uint64_t total_cycles;
> -    unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets;
>      /* shared pool authentication */
>      uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2];
>  };
> @@ -107,17 +81,6 @@ struct tmem_pool {
>      struct tmem_page_descriptor *cur_pgp;
>      /* statistics collection */
>      atomic_t pgp_count;
> -    int pgp_count_max;
> -    long obj_count;  /* atomicity depends on pool_rwlock held for write */
> -    long obj_count_max;  
> -    unsigned long objnode_count, objnode_count_max;
> -    uint64_t sum_life_cycles;
> -    uint64_t sum_evicted_cycles;
> -    unsigned long puts, good_puts, no_mem_puts;
> -    unsigned long dup_puts_flushed, dup_puts_replaced;
> -    unsigned long gets, found_gets;
> -    unsigned long flushs, flushs_found;
> -    unsigned long flush_objs, flush_objs_found;
>  };
>  
>  #define is_persistent(_p)  (_p->persistent)
> @@ -130,7 +93,6 @@ struct oid {
>  struct tmem_object_root {
>      struct oid oid;
>      struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */
> -    unsigned long objnode_count; /* atomicity depends on obj_spinlock */
>      long pgp_count; /* atomicity depends on obj_spinlock */
>      struct radix_tree_root tree_root; /* tree of pages within object */
>      struct tmem_pool *pool;
> @@ -196,7 +158,6 @@ struct rb_root pcd_tree_roots[256]; /* choose based on 
> first byte of page */
>  rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */
>  
>  static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral 
> pools */
> -
>  static LIST_HEAD(global_client_list);
>  static LIST_HEAD(global_pool_list);
>  
> @@ -206,7 +167,6 @@ static atomic_t client_weight_total = ATOMIC_INIT(0);
>  static int tmem_initialized = 0;
>  
>  /************ CONCURRENCY  ***********************************************/
> -
>  DEFINE_SPINLOCK(tmem_spinlock);  /* used iff tmem_lock_all */
>  DEFINE_RWLOCK(tmem_rwlock);      /* used iff !tmem_lock_all */
>  static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */
> @@ -226,23 +186,6 @@ static DEFINE_SPINLOCK(pers_lists_spinlock);
>  
>  /* global counters (should use long_atomic_t access) */
>  static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock 
> */
> -static atomic_t global_obj_count = ATOMIC_INIT(0);
> -static atomic_t global_pgp_count = ATOMIC_INIT(0);
> -static atomic_t global_pcd_count = ATOMIC_INIT(0);
> -static atomic_t global_page_count = ATOMIC_INIT(0);
> -static atomic_t global_rtree_node_count = ATOMIC_INIT(0);
> -
> -#define atomic_inc_and_max(_c) do { \
> -    atomic_inc(&_c); \
> -    if ( _atomic_read(_c) > _c##_max ) \
> -        _c##_max = _atomic_read(_c); \
> -} while (0)
> -
> -#define atomic_dec_and_assert(_c) do { \
> -    atomic_dec(&_c); \
> -    ASSERT(_atomic_read(_c) >= 0); \
> -} while (0)
> -
>  
>  /************ MEMORY ALLOCATION INTERFACE *****************************/
>  static void *tmem_malloc(size_t size, struct tmem_pool *pool)
> @@ -259,8 +202,6 @@ static void *tmem_malloc(size_t size, struct tmem_pool 
> *pool)
>          ASSERT( tmem_mempool != NULL );
>          v = xmem_pool_alloc(size, tmem_mempool);
>      }
> -    if ( v == NULL )
> -        alloc_failed++;
>      return v;
>  }
>  
> @@ -286,10 +227,6 @@ static struct page_info *tmem_page_alloc(struct 
> tmem_pool *pool)
>          pfp = tmem_alloc_page_thispool(pool->client->domain);
>      else
>          pfp = tmem_alloc_page(pool,0);
> -    if ( pfp == NULL )
> -        alloc_page_failed++;
> -    else
> -        atomic_inc_and_max(global_page_count);
>      return pfp;
>  }
>  
> @@ -300,7 +237,6 @@ static void tmem_page_free(struct tmem_pool *pool, struct 
> page_info *pfp)
>          tmem_free_page(pfp);
>      else
>          tmem_free_page_thispool(pfp);
> -    atomic_dec_and_assert(global_page_count);
>  }
>  
>  /************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/
> @@ -339,7 +275,6 @@ static void pcd_disassociate(struct tmem_page_descriptor 
> *pgp, struct tmem_pool
>      pagesize_t pcd_size = pcd->size;
>      pagesize_t pgp_size = pgp->size;
>      char *pcd_cdata = pgp->pcd->cdata;
> -    pagesize_t pcd_csize = pgp->pcd->size;
>  
>      ASSERT(tmem_dedup_enabled());
>      ASSERT(firstbyte != NOT_SHAREABLE);
> @@ -368,25 +303,18 @@ static void pcd_disassociate(struct 
> tmem_page_descriptor *pgp, struct tmem_pool
>      RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);
>      /* now free up the pcd memory */
>      tmem_free(pcd, NULL);
> -    atomic_dec_and_assert(global_pcd_count);
>      if ( pgp_size != 0 && pcd_size < PAGE_SIZE )
>      {
>          /* compressed data */
>          tmem_free(pcd_cdata, pool);
> -        pcd_tot_csize -= pcd_csize;
>      }
>      else if ( pcd_size != PAGE_SIZE )
>      {
>          /* trailing zero data */
> -        pcd_tot_tze_size -= pcd_size;
>          if ( pcd_size )
>              tmem_free(pcd_tze, pool);
>      } else {
>          /* real physical page */
> -        if ( tmem_tze_enabled() )
> -            pcd_tot_tze_size -= PAGE_SIZE;
> -        if ( tmem_compression_enabled() )
> -            pcd_tot_csize -= PAGE_SIZE;
>          tmem_page_free(pool,pfp);
>      }
>      tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]);
> @@ -467,7 +395,6 @@ static int pcd_associate(struct tmem_page_descriptor 
> *pgp, char *cdata, pagesize
>              /* but if compressed, data is assumed static so don't free! */
>              if ( cdata == NULL )
>                  tmem_page_free(pgp->us.obj->pool,pgp->pfp);
> -            deduped_puts++;
>              goto match;
>          }
>      }
> @@ -485,7 +412,6 @@ static int pcd_associate(struct tmem_page_descriptor 
> *pgp, char *cdata, pagesize
>              goto unlock;
>          }
>      }
> -    atomic_inc_and_max(global_pcd_count);
>      RB_CLEAR_NODE(&pcd->pcd_rb_tree_node);  /* is this necessary */
>      INIT_LIST_HEAD(&pcd->pgp_list);  /* is this necessary */
>      pcd->pgp_ref_count = 0;
> @@ -493,7 +419,6 @@ static int pcd_associate(struct tmem_page_descriptor 
> *pgp, char *cdata, pagesize
>      {
>          memcpy(pcd->cdata,cdata,csize);
>          pcd->size = csize;
> -        pcd_tot_csize += csize;
>      } else if ( pfp_size == 0 ) {
>          ASSERT(tmem_tze_enabled());
>          pcd->size = 0;
> @@ -502,15 +427,10 @@ static int pcd_associate(struct tmem_page_descriptor 
> *pgp, char *cdata, pagesize
>           ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) {
>          tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size);
>          pcd->size = pfp_size;
> -        pcd_tot_tze_size += pfp_size;
>          tmem_page_free(pgp->us.obj->pool,pgp->pfp);
>      } else {
>          pcd->pfp = pgp->pfp;
>          pcd->size = PAGE_SIZE;
> -        if ( tmem_tze_enabled() )
> -            pcd_tot_tze_size += PAGE_SIZE;
> -        if ( tmem_compression_enabled() )
> -            pcd_tot_csize += PAGE_SIZE;
>      }
>      rb_link_node(&pcd->pcd_rb_tree_node, parent, new);
>      rb_insert_color(&pcd->pcd_rb_tree_node, root);
> @@ -553,8 +473,7 @@ static struct tmem_page_descriptor *pgp_alloc(struct 
> tmem_object_root *obj)
>      pgp->size = -1;
>      pgp->index = -1;
>      pgp->timestamp = get_cycles();
> -    atomic_inc_and_max(global_pgp_count);
> -    atomic_inc_and_max(pool->pgp_count);
> +    atomic_inc(&pool->pgp_count);
>      return pgp;
>  }
>  
> @@ -578,11 +497,6 @@ static void pgp_free_data(struct tmem_page_descriptor 
> *pgp, struct tmem_pool *po
>          tmem_free(pgp->cdata, pool);
>      else
>          tmem_page_free(pgp->us.obj->pool,pgp->pfp);
> -    if ( pool != NULL && pgp_size )
> -    {
> -        pool->client->compressed_pages--;
> -        pool->client->compressed_sum_size -= pgp_size;
> -    }
>      pgp->pfp = NULL;
>      pgp->size = -1;
>  }
> @@ -603,8 +517,7 @@ static void pgp_free(struct tmem_page_descriptor *pgp, 
> int from_delete)
>          ASSERT(list_empty(&pgp->us.client_eph_pages));
>      }
>      pgp_free_data(pgp, pool);
> -    atomic_dec_and_assert(global_pgp_count);
> -    atomic_dec_and_assert(pool->pgp_count);
> +    atomic_dec(&pool->pgp_count);
>      pgp->size = -1;
>      if ( is_persistent(pool) && pool->client->live_migrating )
>      {
> @@ -676,7 +589,6 @@ static void pgp_delete(struct tmem_page_descriptor *pgp, 
> bool_t no_eph_lock)
>      ASSERT(pgp->us.obj != NULL);
>      ASSERT(pgp->us.obj->pool != NULL);
>      life = get_cycles() - pgp->timestamp;
> -    pgp->us.obj->pool->sum_life_cycles += life;
>      pgp_delist(pgp, no_eph_lock);
>      pgp_free(pgp,1);
>  }
> @@ -734,10 +646,6 @@ static struct radix_tree_node *rtn_alloc(void *arg)
>          return NULL;
>      objnode->obj = obj;
>      memset(&objnode->rtn, 0, sizeof(struct radix_tree_node));
> -    if (++obj->pool->objnode_count > obj->pool->objnode_count_max)
> -        obj->pool->objnode_count_max = obj->pool->objnode_count;
> -    atomic_inc_and_max(global_rtree_node_count);
> -    obj->objnode_count++;
>      return &objnode->rtn;
>  }
>  
> @@ -753,11 +661,8 @@ static void rtn_free(struct radix_tree_node *rtn, void 
> *arg)
>      ASSERT_SPINLOCK(&objnode->obj->obj_spinlock);
>      pool = objnode->obj->pool;
>      ASSERT(pool != NULL);
> -    pool->objnode_count--;
> -    objnode->obj->objnode_count--;
>      objnode->obj = NULL;
>      tmem_free(objnode, pool);
> -    atomic_dec_and_assert(global_rtree_node_count);
>  }
>  
>  /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES 
> *******************/
> @@ -850,15 +755,11 @@ static void obj_free(struct tmem_object_root *obj, int 
> no_rebalance)
>      ASSERT_WRITELOCK(&pool->pool_rwlock);
>      if ( obj->tree_root.rnode != NULL ) /* may be a "stump" with no leaves */
>          radix_tree_destroy(&obj->tree_root, pgp_destroy);
> -    ASSERT((long)obj->objnode_count == 0);
>      ASSERT(obj->tree_root.rnode == NULL);
> -    pool->obj_count--;
> -    ASSERT(pool->obj_count >= 0);
>      obj->pool = NULL;
>      old_oid = obj->oid;
>      oid_set_invalid(&obj->oid);
>      obj->last_client = TMEM_CLI_ID_NULL;
> -    atomic_dec_and_assert(global_obj_count);
>      /* use no_rebalance only if all objects are being destroyed anyway */
>      if ( !no_rebalance )
>          rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]);
> @@ -905,16 +806,11 @@ static struct tmem_object_root * obj_new(struct 
> tmem_pool *pool, struct oid *oid
>      ASSERT_WRITELOCK(&pool->pool_rwlock);
>      if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL )
>          return NULL;
> -    pool->obj_count++;
> -    if (pool->obj_count > pool->obj_count_max)
> -        pool->obj_count_max = pool->obj_count;
> -    atomic_inc_and_max(global_obj_count);
>      radix_tree_init(&obj->tree_root);
>      radix_tree_set_alloc_callbacks(&obj->tree_root, rtn_alloc, rtn_free, 
> obj);
>      spin_lock_init(&obj->obj_spinlock);
>      obj->pool = pool;
>      obj->oid = *oidp;
> -    obj->objnode_count = 0;
>      obj->pgp_count = 0;
>      obj->last_client = TMEM_CLI_ID_NULL;
>      tmem_spin_lock(&obj->obj_spinlock);
> @@ -978,16 +874,9 @@ static struct tmem_pool * pool_alloc(void)
>      INIT_LIST_HEAD(&pool->persistent_page_list);
>      pool->cur_pgp = NULL;
>      rwlock_init(&pool->pool_rwlock);
> -    pool->pgp_count_max = pool->obj_count_max = 0;
> -    pool->objnode_count = pool->objnode_count_max = 0;
>      atomic_set(&pool->pgp_count,0);
> -    pool->obj_count = 0; pool->shared_count = 0;
> +    pool->shared_count = 0;
>      pool->pageshift = PAGE_SHIFT - 12;
> -    pool->good_puts = pool->puts = pool->dup_puts_flushed = 0;
> -    pool->dup_puts_replaced = pool->no_mem_puts = 0;
> -    pool->found_gets = pool->gets = 0;
> -    pool->flushs_found = pool->flushs = 0;
> -    pool->flush_objs_found = pool->flush_objs = 0;
>      pool->is_dying = 0;
>      return pool;
>  }
> @@ -1163,9 +1052,7 @@ static struct client *client_create(domid_t cli_id)
>      INIT_LIST_HEAD(&client->ephemeral_page_list);
>      INIT_LIST_HEAD(&client->persistent_invalidated_list);
>      client->cur_pgp = NULL;
> -    client->eph_count = client->eph_count_max = 0;
> -    client->total_cycles = 0; client->succ_pers_puts = 0;
> -    client->succ_eph_gets = 0; client->succ_pers_gets = 0;
> +    client->eph_count = 0;
>      tmem_client_info("ok\n");
>      return client;
>  
> @@ -1273,7 +1160,6 @@ static int tmem_evict(void)
>      int ret = 0;
>      bool_t hold_pool_rwlock = 0;
>  
> -    evict_attempts++;
>      tmem_spin_lock(&eph_lists_spinlock);
>      if ( (client != NULL) && client_over_quota(client) &&
>           !list_empty(&client->ephemeral_page_list) )
> @@ -1318,7 +1204,6 @@ found:
>          tmem_spin_unlock(&obj->obj_spinlock);
>      if ( hold_pool_rwlock )
>          tmem_write_unlock(&pool->pool_rwlock);
> -    evicted_pgs++;
>      ret = 1;
>  
>  out:
> @@ -1404,8 +1289,6 @@ static int do_tmem_put_compress(struct 
> tmem_page_descriptor *pgp, xen_pfn_t cmfn
>          pgp->cdata = p;
>      }
>      pgp->size = size;
> -    pgp->us.obj->pool->client->compressed_pages++;
> -    pgp->us.obj->pool->client->compressed_sum_size += size;
>      ret = 1;
>  
>  out:
> @@ -1443,7 +1326,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor 
> *pgp, xen_pfn_t cmfn,
>          else if ( ret == -ENOMEM )
>              goto failed_dup;
>          else if ( ret == -EFAULT )
> -            goto bad_copy;
> +            goto cleanup;
>      }
>  
>  copy_uncompressed:
> @@ -1454,7 +1337,7 @@ copy_uncompressed:
>      pgp->size = 0;
>      ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null);
>      if ( ret < 0 )
> -        goto bad_copy;
> +        goto cleanup;
>      if ( tmem_dedup_enabled() && !is_persistent(pool) )
>      {
>          if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
> @@ -1467,16 +1350,8 @@ done:
>          obj->last_client = client->cli_id;
>      obj->no_evict = 0;
>      tmem_spin_unlock(&obj->obj_spinlock);
> -    pool->dup_puts_replaced++;
> -    pool->good_puts++;
> -    if ( is_persistent(pool) )
> -        client->succ_pers_puts++;
>      return 1;
>  
> -bad_copy:
> -    failed_copies++;
> -    goto cleanup;
> -
>  failed_dup:
>     /* couldn't change out the data, flush the old data and return
>      * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
> @@ -1494,7 +1369,6 @@ cleanup:
>          obj->no_evict = 0;
>          tmem_spin_unlock(&obj->obj_spinlock);
>      }
> -    pool->dup_puts_flushed++;
>      return ret;
>  }
>  
> @@ -1510,7 +1384,6 @@ static int do_tmem_put(struct tmem_pool *pool,
>      ASSERT(pool != NULL);
>      client = pool->client;
>      ret = client->frozen ? -EFROZEN : -ENOMEM;
> -    pool->puts++;
>      /* does page already exist (dup)?  if so, handle specially */
>      if ( (obj = obj_find(pool,oidp)) != NULL )
>      {
> @@ -1560,16 +1433,14 @@ static int do_tmem_put(struct tmem_pool *pool,
>              goto insert_page;
>          if ( ret == -ENOMEM )
>          {
> -            client->compress_nomem++;
>              goto del_pgp_from_obj;
>          }
>          if ( ret == 0 )
>          {
> -            client->compress_poor++;
>              goto copy_uncompressed;
>          }
>          if ( ret == -EFAULT )
> -            goto bad_copy;
> +            goto del_pgp_from_obj;
>      }
>  
>  copy_uncompressed:
> @@ -1580,7 +1451,7 @@ copy_uncompressed:
>      }
>      ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf);
>      if ( ret < 0 )
> -        goto bad_copy;
> +        goto del_pgp_from_obj;
>  
>      if ( tmem_dedup_enabled() && !is_persistent(pool) )
>      {
> @@ -1594,12 +1465,10 @@ insert_page:
>          tmem_spin_lock(&eph_lists_spinlock);
>          list_add_tail(&pgp->global_eph_pages,
>              &global_ephemeral_page_list);
> -        if (++global_eph_count > global_eph_count_max)
> -            global_eph_count_max = global_eph_count;
> +        ++global_eph_count;
>          list_add_tail(&pgp->us.client_eph_pages,
>              &client->ephemeral_page_list);
> -        if (++client->eph_count > client->eph_count_max)
> -            client->eph_count_max = client->eph_count;
> +        ++client->eph_count;
>          tmem_spin_unlock(&eph_lists_spinlock);
>      }
>      else
> @@ -1616,17 +1485,8 @@ insert_page:
>  
>      /* free the obj spinlock */
>      tmem_spin_unlock(&obj->obj_spinlock);
> -    pool->good_puts++;
> -
> -    if ( is_persistent(pool) )
> -        client->succ_pers_puts++;
> -    else
> -        tot_good_eph_puts++;
>      return 1;
>  
> -bad_copy:
> -    failed_copies++;
> -
>  del_pgp_from_obj:
>      ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
>      pgp_delete_from_obj(obj, pgp->index);
> @@ -1645,7 +1505,6 @@ unlock_obj:
>          obj->no_evict = 0;
>          tmem_spin_unlock(&obj->obj_spinlock);
>      }
> -    pool->no_mem_puts++;
>      return ret;
>  }
>  
> @@ -1660,7 +1519,6 @@ static int do_tmem_get(struct tmem_pool *pool, struct 
> oid *oidp, uint32_t index,
>      if ( !_atomic_read(pool->pgp_count) )
>          return -EEMPTY;
>  
> -    pool->gets++;
>      obj = obj_find(pool,oidp);
>      if ( obj == NULL )
>          return 0;
> @@ -1687,7 +1545,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct 
> oid *oidp, uint32_t index,
>      else
>          rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf);
>      if ( rc <= 0 )
> -        goto bad_copy;
> +        goto out;
>  
>      if ( !is_persistent(pool) )
>      {
> @@ -1716,17 +1574,11 @@ static int do_tmem_get(struct tmem_pool *pool, struct 
> oid *oidp, uint32_t index,
>          obj->no_evict = 0;
>          tmem_spin_unlock(&obj->obj_spinlock);
>      }
> -    pool->found_gets++;
> -    if ( is_persistent(pool) )
> -        client->succ_pers_gets++;
> -    else
> -        client->succ_eph_gets++;
>      return 1;
>  
> -bad_copy:
> +out:
>      obj->no_evict = 0;
>      tmem_spin_unlock(&obj->obj_spinlock);
> -    failed_copies++;
>      return rc;
>  }
>  
> @@ -1735,7 +1587,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, 
> struct oid *oidp, uint32_t
>      struct tmem_object_root *obj;
>      struct tmem_page_descriptor *pgp;
>  
> -    pool->flushs++;
>      obj = obj_find(pool,oidp);
>      if ( obj == NULL )
>          goto out;
> @@ -1756,7 +1607,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, 
> struct oid *oidp, uint32_t
>          obj->no_evict = 0;
>          tmem_spin_unlock(&obj->obj_spinlock);
>      }
> -    pool->flushs_found++;
>  
>  out:
>      if ( pool->client->frozen )
> @@ -1769,13 +1619,11 @@ static int do_tmem_flush_object(struct tmem_pool 
> *pool, struct oid *oidp)
>  {
>      struct tmem_object_root *obj;
>  
> -    pool->flush_objs++;
>      obj = obj_find(pool,oidp);
>      if ( obj == NULL )
>          goto out;
>      tmem_write_lock(&pool->pool_rwlock);
>      obj_destroy(obj,0);
> -    pool->flush_objs_found++;
>      tmem_write_unlock(&pool->pool_rwlock);
>  
>  out:
> @@ -2386,8 +2234,6 @@ long do_tmem_op(tmem_cli_op_t uops)
>      if ( !tmem_current_permitted() )
>          return -EPERM;
>  
> -    total_tmem_ops++;
> -
>      if ( tmem_lock_all )
>      {
>          if ( tmem_lock_all > 1 )
> @@ -2402,7 +2248,6 @@ long do_tmem_op(tmem_cli_op_t uops)
>          if ( tmem_lock_all )
>              goto out;
>   simple_error:
> -        errored_tmem_ops++;
>          return rc;
>      }
>  
> @@ -2512,8 +2357,6 @@ long do_tmem_op(tmem_cli_op_t uops)
>      }
>  
>  out:
> -    if ( rc < 0 )
> -        errored_tmem_ops++;
>      if ( tmem_lock_all )
>      {
>          if ( tmem_lock_all > 1 )
> @@ -2590,7 +2433,6 @@ void *tmem_relinquish_pages(unsigned int order, 
> unsigned int memflags)
>      if (!tmem_enabled() || !tmem_freeable_pages())
>          return NULL;
>  
> -    relinq_attempts++;
>      if ( order > 0 )
>      {
>  #ifndef NDEBUG
> @@ -2613,13 +2455,10 @@ void *tmem_relinquish_pages(unsigned int order, 
> unsigned int memflags)
>              break;
>          evicts_per_relinq++;
>      }
> -    if ( evicts_per_relinq > max_evicts_per_relinq )
> -        max_evicts_per_relinq = evicts_per_relinq;
>      if ( pfp != NULL )
>      {
>          if ( !(memflags & MEMF_tmem) )
>              scrub_one_page(pfp);
> -        relinq_pgs++;
>      }
>  
>      if ( tmem_called_from_tmem(memflags) )
> -- 
> 1.7.10.4
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.