static uint64_t stat_mem_deallocated_total = 0;
static uint64_t stat_mem_high = 0;
static uint64_t stat_mem_peak = 0;
+static uint64_t stat_mem_strdups = 0;
static uint64_t stat_used_strdups = 0;
static uint64_t stat_used_vectors = 0;
static uint64_t stat_used_hashtables = 0;
stat_mem_block_t *info = (stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size);
void *data = (void*)(info + 1);
- if(!info)
+ if(GMQCC_UNLIKELY(!info))
return NULL;
info->line = line;
info->prev = NULL;
info->next = stat_mem_block_root;
- if (stat_mem_block_root)
+ /* unlikely since it only happens once */
+ if (GMQCC_UNLIKELY(stat_mem_block_root != NULL))
stat_mem_block_root->prev = info;
stat_mem_block_root = info;
void stat_mem_deallocate(void *ptr) {
stat_mem_block_t *info = NULL;
- if (!ptr)
+ if (GMQCC_UNLIKELY(!ptr))
return;
info = ((stat_mem_block_t*)ptr - 1);
stat_mem_block_t *oldinfo = NULL;
stat_mem_block_t *newinfo;
- if (!ptr)
+ if (GMQCC_UNLIKELY(!ptr))
return stat_mem_allocate(size, line, file);
- /* stay consistent with glic */
- if (!size) {
+ /* stay consistent with glibc */
+ if (GMQCC_UNLIKELY(!size)) {
stat_mem_deallocate(ptr);
return NULL;
}
oldinfo = ((stat_mem_block_t*)ptr - 1);
newinfo = ((stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size));
- if (!newinfo) {
+ if (GMQCC_UNLIKELY(!newinfo)) {
stat_mem_deallocate(ptr);
return NULL;
}
newinfo->prev = NULL;
newinfo->next = stat_mem_block_root;
- if (stat_mem_block_root)
+ /*
+ * likely since the only time there is no root is when it's
+ * being initialized first.
+ */
+ if (GMQCC_LIKELY(stat_mem_block_root != NULL))
stat_mem_block_root->prev = newinfo;
stat_mem_block_root = newinfo;
stat_mem_peak = stat_mem_high;
free(oldinfo);
-
return newinfo + 1;
}
}
stat_used_strdups ++;
+ stat_mem_strdups += len;
return ptr;
}
* This is a patched version of the Murmur2 hashing function to use
* a proper pre-mix and post-mix setup. Infact this is Murmur3 for
* the most part just reinvented.
- *
+ *
* Murmur 2 contains an inner loop such as:
* while (l >= 4) {
* u32 k = *(u32*)d;
* k *= m;
* k ^= k >> r;
* k *= m;
- *
+ *
* h *= m;
* h ^= k;
* d += 4;
* l -= 4;
* }
- *
+ *
* The two u32s that form the key are the same value x (pulled from data)
* this premix stage will perform the same results for both values. Unrolled
* this produces just:
* h ^= x;
* h *= m;
* h ^= x;
- *
+ *
* This appears to be fine, except what happens when m == 1? well x
* cancels out entierly, leaving just:
* x ^= x >> r;
* h ^= x;
* h ^= x;
- *
+ *
* So all keys hash to the same value, but how often does m == 1?
* well, it turns out testing x for all possible values yeilds only
* 172,013,942 unique results instead of 2^32. So nearly ~4.6 bits
* are cancelled out on average!
- *
+ *
* This means we have a 14.5% (rounded) chance of colliding more, which
* results in another bucket/chain for the hashtable.
*
return util_htgeth(ht, key, util_hthash(ht, key));
}
+void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin);
void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin) {
hash_node_t *pair;
size_t len, keylen;
uint64_t mem = 0;
con_out("Memory Statistics:\n\
- Total vectors allocated: %llu\n\
- Total string duplicates: %llu\n\
- Total hashtables allocated: %llu\n\
- Total unique vector sizes: %llu\n",
+ Total vectors allocated: %llu\n\
+ Total string duplicates: %llu\n\
+ Total string duplicate memory: %f (MB)\n\
+ Total hashtables allocated: %llu\n\
+ Total unique vector sizes: %llu\n",
stat_used_vectors,
stat_used_strdups,
+ (float)(stat_mem_strdups) / 1048576.0f,
stat_used_hashtables,
stat_type_vectors
);