diff options
Diffstat (limited to 'src/basic/hashmap.c')
-rw-r--r-- | src/basic/hashmap.c | 125 |
1 files changed, 124 insertions, 1 deletions
diff --git a/src/basic/hashmap.c b/src/basic/hashmap.c index fbc8d397a..876a10eeb 100644 --- a/src/basic/hashmap.c +++ b/src/basic/hashmap.c @@ -229,7 +229,8 @@ struct HashmapBase { unsigned n_direct_entries:3; /* Number of entries in direct storage. * Only valid if !has_indirect. */ bool from_pool:1; /* whether was allocated from mempool */ - bool dirty:1; /* whether dirtied since cache sync */ + bool dirty:1; /* whether dirtied since last iterated_cache_get() */ + bool cached:1; /* whether this hashmap is being cached */ HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */ }; @@ -249,6 +250,17 @@ struct Set { struct HashmapBase b; }; +typedef struct CacheMem { + const void **ptr; + size_t n_populated, n_allocated; + bool active:1; +} CacheMem; + +struct IteratedCache { + HashmapBase *hashmap; + CacheMem keys, values; +}; + DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8); DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8); /* No need for a separate Set pool */ @@ -744,6 +756,25 @@ bool set_iterate(Set *s, Iterator *i, void **value) { (idx != IDX_NIL); \ (idx) = hashmap_iterate_entry((h), &(i))) +IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) { + IteratedCache *cache; + + assert(h); + assert(!h->cached); + + if (h->cached) + return NULL; + + cache = new0(IteratedCache, 1); + if (!cache) + return NULL; + + cache->hashmap = h; + h->cached = true; + + return cache; +} + static void reset_direct_storage(HashmapBase *h) { const struct hashmap_type_info *hi = &hashmap_type_info[h->type]; void *p; @@ -1870,3 +1901,95 @@ int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags } } #endif // 0 + +/* expand the cachemem if needed, return true if newly (re)activated. */ +static int cachemem_maintain(CacheMem *mem, unsigned size) { + int r = false; + + assert(mem); + + if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)) { + if (size > 0) + return -ENOMEM; + } + + if (!mem->active) + mem->active = r = true; + + return r; +} + +int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) { + bool sync_keys = false, sync_values = false; + unsigned size; + int r; + + assert(cache); + assert(cache->hashmap); + + size = n_entries(cache->hashmap); + + if (res_keys) { + r = cachemem_maintain(&cache->keys, size); + if (r < 0) + return r; + + sync_keys = r; + } else + cache->keys.active = false; + + if (res_values) { + r = cachemem_maintain(&cache->values, size); + if (r < 0) + return r; + + sync_values = r; + } else + cache->values.active = false; + + if (cache->hashmap->dirty) { + if (cache->keys.active) + sync_keys = true; + if (cache->values.active) + sync_values = true; + + cache->hashmap->dirty = false; + } + + if (sync_keys || sync_values) { + unsigned i, idx; + Iterator iter; + + i = 0; + HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter) { + struct hashmap_base_entry *e; + + e = bucket_at(cache->hashmap, idx); + + if (sync_keys) + cache->keys.ptr[i] = e->key; + if (sync_values) + cache->values.ptr[i] = entry_value(cache->hashmap, e); + i++; + } + } + + if (res_keys) + *res_keys = cache->keys.ptr; + if (res_values) + *res_values = cache->values.ptr; + if (res_n_entries) + *res_n_entries = size; + + return 0; +} + +IteratedCache *iterated_cache_free(IteratedCache *cache) { + if (cache) { + free(cache->keys.ptr); + free(cache->values.ptr); + free(cache); + } + + return NULL; +} |