[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 13/17] tools/xenstore: switch hashtable to use the talloc framework
Instead of using malloc() and friends, let the hashtable implementation use the talloc framework. This is more consistent with the rest of xenstored and it allows to track memory usage via "xenstore-control memreport". Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> --- V3: - make key and value children of element (if flagged) --- tools/xenstore/hashtable.c | 98 +++++++++++-------------------- tools/xenstore/hashtable.h | 3 +- tools/xenstore/xenstored_core.c | 5 +- tools/xenstore/xenstored_domain.c | 2 +- 4 files changed, 38 insertions(+), 70 deletions(-) diff --git a/tools/xenstore/hashtable.c b/tools/xenstore/hashtable.c index ddca1591a2..30eb9f21d2 100644 --- a/tools/xenstore/hashtable.c +++ b/tools/xenstore/hashtable.c @@ -6,6 +6,8 @@ #include <string.h> #include <math.h> #include <stdint.h> +#include <stdarg.h> +#include "talloc.h" struct entry { @@ -50,7 +52,7 @@ indexFor(unsigned int tablelength, unsigned int hashvalue) { /*****************************************************************************/ struct hashtable * -create_hashtable(unsigned int minsize, +create_hashtable(const void *ctx, unsigned int minsize, unsigned int (*hashf) (void*), int (*eqf) (void*,void*), unsigned int flags) @@ -66,10 +68,10 @@ create_hashtable(unsigned int minsize, if (primes[pindex] > minsize) { size = primes[pindex]; break; } } - h = (struct hashtable *)calloc(1, sizeof(struct hashtable)); + h = talloc_zero(ctx, struct hashtable); if (NULL == h) goto err0; - h->table = (struct entry **)calloc(size, sizeof(struct entry *)); + h->table = talloc_zero_array(h, struct entry *, size); if (NULL == h->table) goto err1; @@ -83,7 +85,7 @@ create_hashtable(unsigned int minsize, return h; err1: - free(h); + talloc_free(h); err0: return NULL; } @@ -115,47 +117,32 @@ hashtable_expand(struct hashtable *h) if (h->primeindex == (prime_table_length - 1)) return 0; newsize = primes[++(h->primeindex)]; - newtable = (struct entry **)calloc(newsize, sizeof(struct entry*)); - if (NULL != newtable) + newtable = talloc_realloc(h, h->table, struct entry *, newsize); + if (!newtable) { - /* This algorithm is not 'stable'. ie. it reverses the list - * when it transfers entries between the tables */ - for (i = 0; i < h->tablelength; i++) { - while (NULL != (e = h->table[i])) { - h->table[i] = e->next; - index = indexFor(newsize,e->h); + h->primeindex--; + return 0; + } + + h->table = newtable; + memset(newtable + h->tablelength, 0, + (newsize - h->tablelength) * sizeof(*newtable)); + for (i = 0; i < h->tablelength; i++) { + for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { + index = indexFor(newsize, e->h); + if (index == i) + { + pE = &(e->next); + } + else + { + *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } - free(h->table); - h->table = newtable; - } - /* Plan B: realloc instead */ - else - { - newtable = (struct entry **) - realloc(h->table, newsize * sizeof(struct entry *)); - if (NULL == newtable) { (h->primeindex)--; return 0; } - h->table = newtable; - memset(newtable + h->tablelength, 0, - (newsize - h->tablelength) * sizeof(*newtable)); - for (i = 0; i < h->tablelength; i++) { - for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { - index = indexFor(newsize,e->h); - if (index == i) - { - pE = &(e->next); - } - else - { - *pE = e->next; - e->next = newtable[index]; - newtable[index] = e; - } - } - } } + h->tablelength = newsize; h->loadlimit = (unsigned int) (((uint64_t)newsize * max_load_factor) / 100); @@ -184,12 +171,16 @@ hashtable_insert(struct hashtable *h, void *k, void *v) * element may be ok. Next time we insert, we'll try expanding again.*/ hashtable_expand(h); } - e = (struct entry *)calloc(1, sizeof(struct entry)); + e = talloc_zero(h, struct entry); if (NULL == e) { --(h->entrycount); return 0; } /*oom*/ e->h = hash(h,k); index = indexFor(h->tablelength,e->h); e->k = k; + if (h->flags & HASHTABLE_FREE_KEY) + talloc_steal(e, k); e->v = v; + if (h->flags & HASHTABLE_FREE_VALUE) + talloc_steal(e, v); e->next = h->table[index]; h->table[index] = e; return -1; @@ -235,11 +226,7 @@ hashtable_remove(struct hashtable *h, void *k) { *pE = e->next; h->entrycount--; - if (h->flags & HASHTABLE_FREE_KEY) - free(e->k); - if (h->flags & HASHTABLE_FREE_VALUE) - free(e->v); - free(e); + talloc_free(e); return; } pE = &(e->next); @@ -278,26 +265,7 @@ hashtable_iterate(struct hashtable *h, void hashtable_destroy(struct hashtable *h) { - unsigned int i; - struct entry *e, *f; - struct entry **table = h->table; - - for (i = 0; i < h->tablelength; i++) - { - e = table[i]; - while (NULL != e) - { - f = e; - e = e->next; - if (h->flags & HASHTABLE_FREE_KEY) - free(f->k); - if (h->flags & HASHTABLE_FREE_VALUE) - free(f->v); - free(f); - } - } - free(h->table); - free(h); + talloc_free(h); } /* diff --git a/tools/xenstore/hashtable.h b/tools/xenstore/hashtable.h index 780ad3c8f7..4e2823134e 100644 --- a/tools/xenstore/hashtable.h +++ b/tools/xenstore/hashtable.h @@ -9,6 +9,7 @@ struct hashtable; * create_hashtable * @name create_hashtable + * @param ctx talloc context to use for allocations * @param minsize minimum initial size of hashtable * @param hashfunction function for hashing keys * @param key_eq_fn function for determining key equality @@ -22,7 +23,7 @@ struct hashtable; #define HASHTABLE_FREE_KEY (1U << 1) struct hashtable * -create_hashtable(unsigned int minsize, +create_hashtable(const void *ctx, unsigned int minsize, unsigned int (*hashfunction) (void*), int (*key_eq_fn) (void*,void*), unsigned int flags diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c index 9cfde76898..17e678256e 100644 --- a/tools/xenstore/xenstored_core.c +++ b/tools/xenstore/xenstored_core.c @@ -2405,11 +2405,10 @@ static int keys_equal_fn(void *key1, void *key2) int remember_string(struct hashtable *hash, const char *str) { - char *k = malloc(strlen(str) + 1); + char *k = talloc_strdup(NULL, str); if (!k) return 0; - strcpy(k, str); return hashtable_insert(hash, k, (void *)1); } @@ -2504,7 +2503,7 @@ void check_store(void) }; /* Don't free values (they are all void *1) */ - reachable = create_hashtable(16, hash_from_key_fn, keys_equal_fn, + reachable = create_hashtable(NULL, 16, hash_from_key_fn, keys_equal_fn, HASHTABLE_FREE_KEY); if (!reachable) { log("check_store: ENOMEM"); diff --git a/tools/xenstore/xenstored_domain.c b/tools/xenstore/xenstored_domain.c index 391395060e..4f4c0a727c 100644 --- a/tools/xenstore/xenstored_domain.c +++ b/tools/xenstore/xenstored_domain.c @@ -931,7 +931,7 @@ void domain_init(int evtfd) int rc; /* Start with a random rather low domain count for the hashtable. */ - domhash = create_hashtable(8, domhash_fn, domeq_fn, 0); + domhash = create_hashtable(NULL, 8, domhash_fn, domeq_fn, 0); if (!domhash) barf_perror("Failed to allocate domain hashtable"); -- 2.35.3
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |