2013-11-14 23:17:54 +04:00
|
|
|
#ifndef HASHMAP_H
|
|
|
|
#define HASHMAP_H
|
|
|
|
|
2023-04-22 23:17:20 +03:00
|
|
|
#include "hash-ll.h"
|
2019-06-20 10:41:49 +03:00
|
|
|
|
2013-11-14 23:17:54 +04:00
|
|
|
/*
|
|
|
|
* Generic implementation of hash-based key-value mappings.
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
|
|
|
* An example that maps long to a string:
|
|
|
|
* For the sake of the example this allows to lookup exact values, too
|
|
|
|
* (i.e. it is operated as a set, the value is part of the key)
|
|
|
|
* -------------------------------------
|
|
|
|
*
|
|
|
|
* struct hashmap map;
|
|
|
|
* struct long2string {
|
2019-10-07 02:30:43 +03:00
|
|
|
* struct hashmap_entry ent;
|
2017-06-30 22:14:07 +03:00
|
|
|
* long key;
|
|
|
|
* char value[FLEX_ARRAY]; // be careful with allocating on stack!
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* #define COMPARE_VALUE 1
|
|
|
|
*
|
2017-11-30 02:51:41 +03:00
|
|
|
* static int long2string_cmp(const void *hashmap_cmp_fn_data,
|
2019-10-07 02:30:37 +03:00
|
|
|
* const struct hashmap_entry *eptr,
|
|
|
|
* const struct hashmap_entry *entry_or_key,
|
2017-11-30 02:51:41 +03:00
|
|
|
* const void *keydata)
|
2017-06-30 22:14:07 +03:00
|
|
|
* {
|
2017-11-30 02:51:41 +03:00
|
|
|
* const char *string = keydata;
|
|
|
|
* unsigned flags = *(unsigned *)hashmap_cmp_fn_data;
|
2019-10-07 02:30:37 +03:00
|
|
|
* const struct long2string *e1, *e2;
|
|
|
|
*
|
|
|
|
* e1 = container_of(eptr, const struct long2string, ent);
|
|
|
|
* e2 = container_of(entry_or_key, const struct long2string, ent);
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
|
|
|
* if (flags & COMPARE_VALUE)
|
2017-11-30 02:51:41 +03:00
|
|
|
* return e1->key != e2->key ||
|
|
|
|
* strcmp(e1->value, string ? string : e2->value);
|
2017-06-30 22:14:07 +03:00
|
|
|
* else
|
2017-11-30 02:51:41 +03:00
|
|
|
* return e1->key != e2->key;
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* int main(int argc, char **argv)
|
|
|
|
* {
|
|
|
|
* long key;
|
2017-11-30 02:51:41 +03:00
|
|
|
* char value[255], action[32];
|
|
|
|
* unsigned flags = 0;
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
2019-10-07 02:30:37 +03:00
|
|
|
* hashmap_init(&map, long2string_cmp, &flags, 0);
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
2017-11-30 02:51:41 +03:00
|
|
|
* while (scanf("%s %ld %s", action, &key, value)) {
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
|
|
|
* if (!strcmp("add", action)) {
|
|
|
|
* struct long2string *e;
|
2017-11-30 02:51:41 +03:00
|
|
|
* FLEX_ALLOC_STR(e, value, value);
|
2019-10-07 02:30:27 +03:00
|
|
|
* hashmap_entry_init(&e->ent, memhash(&key, sizeof(long)));
|
2017-06-30 22:14:07 +03:00
|
|
|
* e->key = key;
|
2019-10-07 02:30:29 +03:00
|
|
|
* hashmap_add(&map, &e->ent);
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (!strcmp("print_all_by_key", action)) {
|
2017-11-30 02:51:41 +03:00
|
|
|
* struct long2string k, *e;
|
2019-11-06 02:31:31 +03:00
|
|
|
* hashmap_entry_init(&k.ent, memhash(&key, sizeof(long)));
|
2017-06-30 22:14:07 +03:00
|
|
|
* k.key = key;
|
|
|
|
*
|
2017-11-30 02:51:41 +03:00
|
|
|
* flags &= ~COMPARE_VALUE;
|
2019-10-07 02:30:42 +03:00
|
|
|
* e = hashmap_get_entry(&map, &k, ent, NULL);
|
2019-10-07 02:30:35 +03:00
|
|
|
* if (e) {
|
2017-11-30 02:51:41 +03:00
|
|
|
* printf("first: %ld %s\n", e->key, e->value);
|
2019-10-07 02:30:35 +03:00
|
|
|
* while ((e = hashmap_get_next_entry(&map, e,
|
|
|
|
* struct long2string, ent))) {
|
2017-11-30 02:51:41 +03:00
|
|
|
* printf("found more: %ld %s\n", e->key, e->value);
|
2019-10-07 02:30:34 +03:00
|
|
|
* }
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (!strcmp("has_exact_match", action)) {
|
|
|
|
* struct long2string *e;
|
2017-11-30 02:51:41 +03:00
|
|
|
* FLEX_ALLOC_STR(e, value, value);
|
2019-10-07 02:30:27 +03:00
|
|
|
* hashmap_entry_init(&e->ent, memhash(&key, sizeof(long)));
|
2017-06-30 22:14:07 +03:00
|
|
|
* e->key = key;
|
|
|
|
*
|
2017-11-30 02:51:41 +03:00
|
|
|
* flags |= COMPARE_VALUE;
|
2019-10-07 02:30:30 +03:00
|
|
|
* printf("%sfound\n",
|
|
|
|
* hashmap_get(&map, &e->ent, NULL) ? "" : "not ");
|
2017-11-30 02:51:41 +03:00
|
|
|
* free(e);
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (!strcmp("has_exact_match_no_heap_alloc", action)) {
|
2017-11-30 02:51:41 +03:00
|
|
|
* struct long2string k;
|
2019-11-06 02:31:31 +03:00
|
|
|
* hashmap_entry_init(&k.ent, memhash(&key, sizeof(long)));
|
2017-11-30 02:51:41 +03:00
|
|
|
* k.key = key;
|
2017-06-30 22:14:07 +03:00
|
|
|
*
|
2017-11-30 02:51:41 +03:00
|
|
|
* flags |= COMPARE_VALUE;
|
2019-10-07 02:30:30 +03:00
|
|
|
* printf("%sfound\n",
|
2019-11-06 02:31:31 +03:00
|
|
|
* hashmap_get(&map, &k.ent, value) ? "" : "not ");
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (!strcmp("end", action)) {
|
2020-11-02 21:55:05 +03:00
|
|
|
* hashmap_clear_and_free(&map, struct long2string, ent);
|
2017-06-30 22:14:07 +03:00
|
|
|
* break;
|
|
|
|
* }
|
|
|
|
* }
|
2017-11-30 02:51:41 +03:00
|
|
|
*
|
|
|
|
* return 0;
|
2017-06-30 22:14:07 +03:00
|
|
|
* }
|
2013-11-14 23:17:54 +04:00
|
|
|
*/
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Ready-to-use hash functions for strings, using the FNV-1 algorithm (see
|
|
|
|
* http://www.isthe.com/chongo/tech/comp/fnv).
|
|
|
|
* `strhash` and `strihash` take 0-terminated strings, while `memhash` and
|
|
|
|
* `memihash` operate on arbitrary-length memory.
|
|
|
|
* `strihash` and `memihash` are case insensitive versions.
|
|
|
|
* `memihash_cont` is a variant of `memihash` that allows a computation to be
|
|
|
|
* continued with another chunk of data.
|
|
|
|
*/
|
2019-04-29 11:28:14 +03:00
|
|
|
unsigned int strhash(const char *buf);
|
|
|
|
unsigned int strihash(const char *buf);
|
|
|
|
unsigned int memhash(const void *buf, size_t len);
|
|
|
|
unsigned int memihash(const void *buf, size_t len);
|
|
|
|
unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len);
|
2013-11-14 23:17:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Converts a cryptographic hash (e.g. SHA-1) into an int-sized hash code
|
|
|
|
* for use in hash tables. Cryptographic hashes are supposed to have
|
|
|
|
* uniform distribution, so in contrast to `memhash()`, this just copies
|
|
|
|
* the first `sizeof(int)` bytes without shuffling any bits. Note that
|
|
|
|
* the results will be different on big-endian and little-endian
|
|
|
|
* platforms, so they should not be stored or transferred over the net.
|
|
|
|
*/
|
2019-06-20 10:41:49 +03:00
|
|
|
static inline unsigned int oidhash(const struct object_id *oid)
|
2014-07-03 02:20:20 +04:00
|
|
|
{
|
|
|
|
/*
|
2019-06-20 10:41:49 +03:00
|
|
|
* Equivalent to 'return *(unsigned int *)oid->hash;', but safe on
|
2014-07-03 02:20:20 +04:00
|
|
|
* platforms that don't support unaligned reads.
|
|
|
|
*/
|
|
|
|
unsigned int hash;
|
2019-06-20 10:41:49 +03:00
|
|
|
memcpy(&hash, oid->hash, sizeof(hash));
|
2014-07-03 02:20:20 +04:00
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* struct hashmap_entry is an opaque structure representing an entry in the
|
2019-10-07 02:30:43 +03:00
|
|
|
* hash table.
|
2017-06-30 22:14:07 +03:00
|
|
|
* Ideally it should be followed by an int-sized member to prevent unused
|
|
|
|
* memory on 64-bit systems due to alignment.
|
|
|
|
*/
|
2013-11-14 23:17:54 +04:00
|
|
|
struct hashmap_entry {
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* next points to the next entry in case of collisions (i.e. if
|
|
|
|
* multiple entries map to the same bucket)
|
|
|
|
*/
|
2013-11-14 23:17:54 +04:00
|
|
|
struct hashmap_entry *next;
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/* entry's hash code */
|
2013-11-14 23:17:54 +04:00
|
|
|
unsigned int hash;
|
|
|
|
};
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* User-supplied function to test two hashmap entries for equality. Shall
|
|
|
|
* return 0 if the entries are equal.
|
|
|
|
*
|
|
|
|
* This function is always called with non-NULL `entry` and `entry_or_key`
|
|
|
|
* parameters that have the same hash code.
|
|
|
|
*
|
|
|
|
* When looking up an entry, the `key` and `keydata` parameters to hashmap_get
|
|
|
|
* and hashmap_remove are always passed as second `entry_or_key` and third
|
|
|
|
* argument `keydata`, respectively. Otherwise, `keydata` is NULL.
|
|
|
|
*
|
|
|
|
* When it is too expensive to allocate a user entry (either because it is
|
2020-07-28 23:45:39 +03:00
|
|
|
* large or variable sized, such that it is not on the stack), then the
|
2017-06-30 22:14:07 +03:00
|
|
|
* relevant data to check for equality should be passed via `keydata`.
|
|
|
|
* In this case `key` can be a stripped down version of the user key data
|
|
|
|
* or even just a hashmap_entry having the correct hash.
|
|
|
|
*
|
|
|
|
* The `hashmap_cmp_fn_data` entry is the pointer given in the init function.
|
|
|
|
*/
|
2017-06-30 22:14:05 +03:00
|
|
|
typedef int (*hashmap_cmp_fn)(const void *hashmap_cmp_fn_data,
|
2019-10-07 02:30:37 +03:00
|
|
|
const struct hashmap_entry *entry,
|
|
|
|
const struct hashmap_entry *entry_or_key,
|
2017-06-30 22:14:05 +03:00
|
|
|
const void *keydata);
|
2013-11-14 23:17:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* struct hashmap is the hash table structure. Members can be used as follows,
|
|
|
|
* but should not be modified directly.
|
|
|
|
*/
|
2013-11-14 23:17:54 +04:00
|
|
|
struct hashmap {
|
|
|
|
struct hashmap_entry **table;
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/* Stores the comparison function specified in `hashmap_init()`. */
|
2013-11-14 23:17:54 +04:00
|
|
|
hashmap_cmp_fn cmpfn;
|
2017-06-30 22:14:05 +03:00
|
|
|
const void *cmpfn_data;
|
2013-11-14 23:17:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/* total number of entries (0 means the hashmap is empty) */
|
2017-09-06 18:43:48 +03:00
|
|
|
unsigned int private_size; /* use hashmap_get_size() */
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* tablesize is the allocated size of the hash table. A non-0 value
|
|
|
|
* indicates that the hashmap is initialized. It may also be useful
|
|
|
|
* for statistical purposes (i.e. `size / tablesize` is the current
|
|
|
|
* load factor).
|
|
|
|
*/
|
|
|
|
unsigned int tablesize;
|
|
|
|
|
|
|
|
unsigned int grow_at;
|
|
|
|
unsigned int shrink_at;
|
|
|
|
|
2017-09-06 18:43:48 +03:00
|
|
|
unsigned int do_count_items : 1;
|
2013-11-14 23:17:54 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* hashmap functions */
|
|
|
|
|
2020-11-02 21:55:03 +03:00
|
|
|
#define HASHMAP_INIT(fn, data) { .cmpfn = fn, .cmpfn_data = data, \
|
|
|
|
.do_count_items = 1 }
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Initializes a hashmap structure.
|
|
|
|
*
|
|
|
|
* `map` is the hashmap to initialize.
|
|
|
|
*
|
|
|
|
* The `equals_function` can be specified to compare two entries for equality.
|
|
|
|
* If NULL, entries are considered equal if their hash codes are equal.
|
|
|
|
*
|
|
|
|
* The `equals_function_data` parameter can be used to provide additional data
|
|
|
|
* (a callback cookie) that will be passed to `equals_function` each time it
|
|
|
|
* is called. This allows a single `equals_function` to implement multiple
|
|
|
|
* comparison functions.
|
|
|
|
*
|
|
|
|
* If the total number of entries is known in advance, the `initial_size`
|
|
|
|
* parameter may be used to preallocate a sufficiently large table and thus
|
|
|
|
* prevent expensive resizing. If 0, the table is dynamically resized.
|
|
|
|
*/
|
2019-04-29 11:28:14 +03:00
|
|
|
void hashmap_init(struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
hashmap_cmp_fn equals_function,
|
|
|
|
const void *equals_function_data,
|
|
|
|
size_t initial_size);
|
2017-06-30 22:14:07 +03:00
|
|
|
|
2020-11-02 21:55:04 +03:00
|
|
|
/* internal functions for clearing or freeing hashmap */
|
|
|
|
void hashmap_partial_clear_(struct hashmap *map, ssize_t offset);
|
2020-11-02 21:55:05 +03:00
|
|
|
void hashmap_clear_(struct hashmap *map, ssize_t offset);
|
2019-10-07 02:30:40 +03:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
2020-10-13 03:40:41 +03:00
|
|
|
* Frees a hashmap structure and allocated memory for the table, but does not
|
|
|
|
* free the entries nor anything they point to.
|
|
|
|
*
|
|
|
|
* Usage note:
|
|
|
|
*
|
|
|
|
* Many callers will need to iterate over all entries and free the data each
|
|
|
|
* entry points to; in such a case, they can free the entry itself while at it.
|
|
|
|
* Thus, you might see:
|
|
|
|
*
|
|
|
|
* hashmap_for_each_entry(map, hashmap_iter, e, hashmap_entry_name) {
|
|
|
|
* free(e->somefield);
|
|
|
|
* free(e);
|
|
|
|
* }
|
2020-11-02 21:55:05 +03:00
|
|
|
* hashmap_clear(map);
|
2020-10-13 03:40:41 +03:00
|
|
|
*
|
|
|
|
* instead of
|
|
|
|
*
|
|
|
|
* hashmap_for_each_entry(map, hashmap_iter, e, hashmap_entry_name) {
|
|
|
|
* free(e->somefield);
|
|
|
|
* }
|
2020-11-02 21:55:05 +03:00
|
|
|
* hashmap_clear_and_free(map, struct my_entry_struct, hashmap_entry_name);
|
2020-10-13 03:40:41 +03:00
|
|
|
*
|
|
|
|
* to avoid the implicit extra loop over the entries. However, if there are
|
|
|
|
* no special fields in your entry that need to be freed beyond the entry
|
|
|
|
* itself, it is probably simpler to avoid the explicit loop and just call
|
2020-11-02 21:55:05 +03:00
|
|
|
* hashmap_clear_and_free().
|
2019-10-07 02:30:40 +03:00
|
|
|
*/
|
2020-11-02 21:55:05 +03:00
|
|
|
#define hashmap_clear(map) hashmap_clear_(map, -1)
|
2019-10-07 02:30:40 +03:00
|
|
|
|
2020-11-02 21:55:04 +03:00
|
|
|
/*
|
2023-03-30 18:28:03 +03:00
|
|
|
* Similar to hashmap_clear(), except that the table is not deallocated; it
|
2020-11-02 21:55:05 +03:00
|
|
|
* is merely zeroed out but left the same size as before. If the hashmap
|
|
|
|
* will be reused, this avoids the overhead of deallocating and
|
|
|
|
* reallocating map->table. As with hashmap_clear(), you may need to free
|
|
|
|
* the entries yourself before calling this function.
|
2020-11-02 21:55:04 +03:00
|
|
|
*/
|
|
|
|
#define hashmap_partial_clear(map) hashmap_partial_clear_(map, -1)
|
|
|
|
|
2019-10-07 02:30:40 +03:00
|
|
|
/*
|
2020-11-02 21:55:05 +03:00
|
|
|
* Similar to hashmap_clear() but also frees all entries. @type is the
|
|
|
|
* struct type of the entry where @member is the hashmap_entry struct used
|
|
|
|
* to associate with @map.
|
2020-10-13 03:40:41 +03:00
|
|
|
*
|
2020-11-02 21:55:05 +03:00
|
|
|
* See usage note above hashmap_clear().
|
2017-06-30 22:14:07 +03:00
|
|
|
*/
|
2020-11-02 21:55:05 +03:00
|
|
|
#define hashmap_clear_and_free(map, type, member) \
|
|
|
|
hashmap_clear_(map, offsetof(type, member))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Similar to hashmap_partial_clear() but also frees all entries. @type is
|
|
|
|
* the struct type of the entry where @member is the hashmap_entry struct
|
|
|
|
* used to associate with @map.
|
|
|
|
*
|
|
|
|
* See usage note above hashmap_clear().
|
|
|
|
*/
|
|
|
|
#define hashmap_partial_clear_and_free(map, type, member) \
|
|
|
|
hashmap_partial_clear_(map, offsetof(type, member))
|
2013-11-14 23:17:54 +04:00
|
|
|
|
|
|
|
/* hashmap_entry functions */
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Initializes a hashmap_entry structure.
|
|
|
|
*
|
|
|
|
* `entry` points to the entry to initialize.
|
|
|
|
* `hash` is the hash code of the entry.
|
|
|
|
*
|
|
|
|
* The hashmap_entry structure does not hold references to external resources,
|
|
|
|
* and it is safe to just discard it once you are done with it (i.e. if
|
|
|
|
* your structure was allocated with xmalloc(), you can just free(3) it,
|
|
|
|
* and if it is on stack, you can just let it go out of scope).
|
|
|
|
*/
|
2019-10-07 02:30:27 +03:00
|
|
|
static inline void hashmap_entry_init(struct hashmap_entry *e,
|
2020-11-02 21:55:02 +03:00
|
|
|
unsigned int hash)
|
2013-11-14 23:17:54 +04:00
|
|
|
{
|
|
|
|
e->hash = hash;
|
|
|
|
e->next = NULL;
|
|
|
|
}
|
2017-06-30 22:14:07 +03:00
|
|
|
|
2017-09-06 18:43:48 +03:00
|
|
|
/*
|
|
|
|
* Return the number of items in the map.
|
|
|
|
*/
|
|
|
|
static inline unsigned int hashmap_get_size(struct hashmap *map)
|
|
|
|
{
|
|
|
|
if (map->do_count_items)
|
|
|
|
return map->private_size;
|
|
|
|
|
|
|
|
BUG("hashmap_get_size: size not set");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Returns the hashmap entry for the specified key, or NULL if not found.
|
|
|
|
*
|
|
|
|
* `map` is the hashmap structure.
|
|
|
|
*
|
|
|
|
* `key` is a user data structure that starts with hashmap_entry that has at
|
|
|
|
* least been initialized with the proper hash code (via `hashmap_entry_init`).
|
|
|
|
*
|
|
|
|
* `keydata` is a data structure that holds just enough information to check
|
|
|
|
* for equality to a given entry.
|
|
|
|
*
|
|
|
|
* If the key data is variable-sized (e.g. a FLEX_ARRAY string) or quite large,
|
|
|
|
* it is undesirable to create a full-fledged entry structure on the heap and
|
|
|
|
* copy all the key data into the structure.
|
|
|
|
*
|
|
|
|
* In this case, the `keydata` parameter can be used to pass
|
|
|
|
* variable-sized key data directly to the comparison function, and the `key`
|
|
|
|
* parameter can be a stripped-down, fixed size entry structure allocated on the
|
|
|
|
* stack.
|
|
|
|
*
|
|
|
|
* If an entry with matching hash code is found, `key` and `keydata` are passed
|
|
|
|
* to `hashmap_cmp_fn` to decide whether the entry matches the key.
|
|
|
|
*/
|
2019-10-07 02:30:36 +03:00
|
|
|
struct hashmap_entry *hashmap_get(const struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
const struct hashmap_entry *key,
|
|
|
|
const void *keydata);
|
2013-11-14 23:17:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Returns the hashmap entry for the specified hash code and key data,
|
|
|
|
* or NULL if not found.
|
|
|
|
*
|
|
|
|
* `map` is the hashmap structure.
|
|
|
|
* `hash` is the hash code of the entry to look up.
|
|
|
|
*
|
|
|
|
* If an entry with matching hash code is found, `keydata` is passed to
|
|
|
|
* `hashmap_cmp_fn` to decide whether the entry matches the key. The
|
|
|
|
* `entry_or_key` parameter of `hashmap_cmp_fn` points to a hashmap_entry
|
|
|
|
* structure that should not be used in the comparison.
|
|
|
|
*/
|
2019-10-07 02:30:36 +03:00
|
|
|
static inline struct hashmap_entry *hashmap_get_from_hash(
|
|
|
|
const struct hashmap *map,
|
|
|
|
unsigned int hash,
|
|
|
|
const void *keydata)
|
2014-07-03 02:22:11 +04:00
|
|
|
{
|
|
|
|
struct hashmap_entry key;
|
|
|
|
hashmap_entry_init(&key, hash);
|
|
|
|
return hashmap_get(map, &key, keydata);
|
|
|
|
}
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Returns the next equal hashmap entry, or NULL if not found. This can be
|
|
|
|
* used to iterate over duplicate entries (see `hashmap_add`).
|
|
|
|
*
|
|
|
|
* `map` is the hashmap structure.
|
|
|
|
* `entry` is the hashmap_entry to start the search from, obtained via a previous
|
|
|
|
* call to `hashmap_get` or `hashmap_get_next`.
|
|
|
|
*/
|
2019-10-07 02:30:34 +03:00
|
|
|
struct hashmap_entry *hashmap_get_next(const struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
const struct hashmap_entry *entry);
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Adds a hashmap entry. This allows to add duplicate entries (i.e.
|
|
|
|
* separate values with the same key according to hashmap_cmp_fn).
|
|
|
|
*
|
|
|
|
* `map` is the hashmap structure.
|
|
|
|
* `entry` is the entry to add.
|
|
|
|
*/
|
2019-10-07 02:30:29 +03:00
|
|
|
void hashmap_add(struct hashmap *map, struct hashmap_entry *entry);
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Adds or replaces a hashmap entry. If the hashmap contains duplicate
|
|
|
|
* entries equal to the specified entry, only one of them will be replaced.
|
|
|
|
*
|
|
|
|
* `map` is the hashmap structure.
|
|
|
|
* `entry` is the entry to add or replace.
|
|
|
|
* Returns the replaced entry, or NULL if not found (i.e. the entry was added).
|
|
|
|
*/
|
2019-10-07 02:30:39 +03:00
|
|
|
struct hashmap_entry *hashmap_put(struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
struct hashmap_entry *entry);
|
2019-10-07 02:30:39 +03:00
|
|
|
|
2019-10-07 02:30:42 +03:00
|
|
|
/*
|
|
|
|
* Adds or replaces a hashmap entry contained within @keyvar,
|
|
|
|
* where @keyvar is a pointer to a struct containing a
|
|
|
|
* "struct hashmap_entry" @member.
|
|
|
|
*
|
|
|
|
* Returns the replaced pointer which is of the same type as @keyvar,
|
|
|
|
* or NULL if not found.
|
|
|
|
*/
|
|
|
|
#define hashmap_put_entry(map, keyvar, member) \
|
|
|
|
container_of_or_null_offset(hashmap_put(map, &(keyvar)->member), \
|
|
|
|
OFFSETOF_VAR(keyvar, member))
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Removes a hashmap entry matching the specified key. If the hashmap contains
|
|
|
|
* duplicate entries equal to the specified key, only one of them will be
|
|
|
|
* removed. Returns the removed entry, or NULL if not found.
|
|
|
|
*
|
|
|
|
* Argument explanation is the same as in `hashmap_get`.
|
|
|
|
*/
|
2019-10-07 02:30:39 +03:00
|
|
|
struct hashmap_entry *hashmap_remove(struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
const struct hashmap_entry *key,
|
|
|
|
const void *keydata);
|
2019-10-07 02:30:39 +03:00
|
|
|
|
2019-10-07 02:30:42 +03:00
|
|
|
/*
|
|
|
|
* Removes a hashmap entry contained within @keyvar,
|
|
|
|
* where @keyvar is a pointer to a struct containing a
|
|
|
|
* "struct hashmap_entry" @member.
|
|
|
|
*
|
|
|
|
* See `hashmap_get` for an explanation of @keydata
|
|
|
|
*
|
|
|
|
* Returns the replaced pointer which is of the same type as @keyvar,
|
|
|
|
* or NULL if not found.
|
|
|
|
*/
|
|
|
|
#define hashmap_remove_entry(map, keyvar, member, keydata) \
|
|
|
|
container_of_or_null_offset( \
|
|
|
|
hashmap_remove(map, &(keyvar)->member, keydata), \
|
|
|
|
OFFSETOF_VAR(keyvar, member))
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns the `bucket` an entry is stored in.
|
|
|
|
* Useful for multithreaded read access.
|
|
|
|
*/
|
2017-03-22 20:14:22 +03:00
|
|
|
int hashmap_bucket(const struct hashmap *map, unsigned int hash);
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Used to iterate over all entries of a hashmap. Note that it is
|
|
|
|
* not safe to add or remove entries to the hashmap while
|
|
|
|
* iterating.
|
|
|
|
*/
|
|
|
|
struct hashmap_iter {
|
|
|
|
struct hashmap *map;
|
|
|
|
struct hashmap_entry *next;
|
|
|
|
unsigned int tablepos;
|
|
|
|
};
|
2013-11-14 23:17:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/* Initializes a `hashmap_iter` structure. */
|
2019-04-29 11:28:14 +03:00
|
|
|
void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter);
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/* Returns the next hashmap_entry, or NULL if there are no more entries. */
|
2019-10-07 02:30:38 +03:00
|
|
|
struct hashmap_entry *hashmap_iter_next(struct hashmap_iter *iter);
|
2017-06-30 22:14:07 +03:00
|
|
|
|
|
|
|
/* Initializes the iterator and returns the first entry, if any. */
|
2019-10-07 02:30:38 +03:00
|
|
|
static inline struct hashmap_entry *hashmap_iter_first(struct hashmap *map,
|
2020-11-02 21:55:02 +03:00
|
|
|
struct hashmap_iter *iter)
|
2013-11-14 23:17:54 +04:00
|
|
|
{
|
|
|
|
hashmap_iter_init(map, iter);
|
|
|
|
return hashmap_iter_next(iter);
|
|
|
|
}
|
|
|
|
|
2019-10-07 02:30:41 +03:00
|
|
|
/*
|
|
|
|
* returns the first entry in @map using @iter, where the entry is of
|
|
|
|
* @type (e.g. "struct foo") and @member is the name of the
|
|
|
|
* "struct hashmap_entry" in @type
|
|
|
|
*/
|
2019-10-07 02:30:38 +03:00
|
|
|
#define hashmap_iter_first_entry(map, iter, type, member) \
|
|
|
|
container_of_or_null(hashmap_iter_first(map, iter), type, member)
|
|
|
|
|
2019-10-07 02:30:41 +03:00
|
|
|
/* internal macro for hashmap_for_each_entry */
|
|
|
|
#define hashmap_iter_next_entry_offset(iter, offset) \
|
|
|
|
container_of_or_null_offset(hashmap_iter_next(iter), offset)
|
|
|
|
|
|
|
|
/* internal macro for hashmap_for_each_entry */
|
|
|
|
#define hashmap_iter_first_entry_offset(map, iter, offset) \
|
|
|
|
container_of_or_null_offset(hashmap_iter_first(map, iter), offset)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* iterate through @map using @iter, @var is a pointer to a type
|
|
|
|
* containing a @member which is a "struct hashmap_entry"
|
|
|
|
*/
|
|
|
|
#define hashmap_for_each_entry(map, iter, var, member) \
|
hashmap_for_each_entry(): workaround MSVC's runtime check failure #3
The OFFSETOF_VAR(var, member) macro is implemented in terms of
offsetof(typeof(*var), member) with compilers that know typeof(),
but its fallback implemenation compares &(var->member) and (var) and
count the distance in bytes, i.e.
((uintptr_t)&(var)->member - (uintptr_t)(var))
MSVC's runtime check, when fed an uninitialized 'var', flags this as
a use of an uninitialized variable (and that is legit---uninitialized
contents of 'var' is subtracted) in a debug build.
After auditing all 6 uses of OFFSETOF_VAR(), 1 of them does feed a
potentially uninitialized 'var' to the macro in the beginning of the
for() loop:
#define hashmap_for_each_entry(map, iter, var, member) \
for (var = hashmap_iter_first_entry_offset(map, iter, \
OFFSETOF_VAR(var, member)); \
var; \
var = hashmap_iter_next_entry_offset(iter, \
OFFSETOF_VAR(var, member)))
We can work around this by making sure that var has _some_ value
when OFFSETOF_VAR() is called. Strictly speaking, it invites
undefined behaviour to use NULL here if we end up with pointer
comparison, but MSVC runtime seems to be happy with it, and most
other systems have typeof() and don't even need pointer comparison
fallback code.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-09-30 18:26:24 +03:00
|
|
|
for (var = NULL, /* for systems without typeof */ \
|
|
|
|
var = hashmap_iter_first_entry_offset(map, iter, \
|
2019-10-07 02:30:41 +03:00
|
|
|
OFFSETOF_VAR(var, member)); \
|
2019-10-07 02:30:38 +03:00
|
|
|
var; \
|
2019-10-07 02:30:41 +03:00
|
|
|
var = hashmap_iter_next_entry_offset(iter, \
|
|
|
|
OFFSETOF_VAR(var, member)))
|
2019-10-07 02:30:38 +03:00
|
|
|
|
2019-10-07 02:30:35 +03:00
|
|
|
/*
|
2019-10-07 02:30:42 +03:00
|
|
|
* returns a pointer of type matching @keyvar, or NULL if nothing found.
|
|
|
|
* @keyvar is a pointer to a struct containing a
|
|
|
|
* "struct hashmap_entry" @member.
|
2019-10-07 02:30:35 +03:00
|
|
|
*/
|
2019-10-07 02:30:42 +03:00
|
|
|
#define hashmap_get_entry(map, keyvar, member, keydata) \
|
|
|
|
container_of_or_null_offset( \
|
|
|
|
hashmap_get(map, &(keyvar)->member, keydata), \
|
|
|
|
OFFSETOF_VAR(keyvar, member))
|
2019-10-07 02:30:35 +03:00
|
|
|
|
|
|
|
#define hashmap_get_entry_from_hash(map, hash, keydata, type, member) \
|
|
|
|
container_of_or_null(hashmap_get_from_hash(map, hash, keydata), \
|
|
|
|
type, member)
|
|
|
|
/*
|
2019-10-07 02:30:41 +03:00
|
|
|
* returns the next equal pointer to @var, or NULL if not found.
|
|
|
|
* @var is a pointer of any type containing "struct hashmap_entry"
|
|
|
|
* @member is the name of the "struct hashmap_entry" field
|
2019-10-07 02:30:35 +03:00
|
|
|
*/
|
2019-10-07 02:30:41 +03:00
|
|
|
#define hashmap_get_next_entry(map, var, member) \
|
|
|
|
container_of_or_null_offset(hashmap_get_next(map, &(var)->member), \
|
|
|
|
OFFSETOF_VAR(var, member))
|
2019-10-07 02:30:35 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* iterate @map starting from @var, where @var is a pointer of @type
|
|
|
|
* and @member is the name of the "struct hashmap_entry" field in @type
|
|
|
|
*/
|
2019-10-07 02:30:41 +03:00
|
|
|
#define hashmap_for_each_entry_from(map, var, member) \
|
2019-10-07 02:30:35 +03:00
|
|
|
for (; \
|
|
|
|
var; \
|
2019-10-07 02:30:41 +03:00
|
|
|
var = hashmap_get_next_entry(map, var, member))
|
2019-10-07 02:30:35 +03:00
|
|
|
|
2017-09-06 18:43:48 +03:00
|
|
|
/*
|
|
|
|
* Disable item counting and automatic rehashing when adding/removing items.
|
|
|
|
*
|
|
|
|
* Normally, the hashmap keeps track of the number of items in the map
|
|
|
|
* and uses it to dynamically resize it. This (both the counting and
|
|
|
|
* the resizing) can cause problems when the map is being used by
|
|
|
|
* threaded callers (because the hashmap code does not know about the
|
|
|
|
* locking strategy used by the threaded callers and therefore, does
|
|
|
|
* not know how to protect the "private_size" counter).
|
|
|
|
*/
|
|
|
|
static inline void hashmap_disable_item_counting(struct hashmap *map)
|
|
|
|
{
|
|
|
|
map->do_count_items = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-11-05 20:07:23 +03:00
|
|
|
* Re-enable item counting when adding/removing items.
|
2017-09-06 18:43:48 +03:00
|
|
|
* If counting is currently disabled, it will force count them.
|
|
|
|
* It WILL NOT automatically rehash them.
|
|
|
|
*/
|
|
|
|
static inline void hashmap_enable_item_counting(struct hashmap *map)
|
|
|
|
{
|
|
|
|
unsigned int n = 0;
|
|
|
|
struct hashmap_iter iter;
|
|
|
|
|
|
|
|
if (map->do_count_items)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hashmap_iter_init(map, &iter);
|
2018-01-14 21:07:48 +03:00
|
|
|
while (hashmap_iter_next(&iter))
|
2017-09-06 18:43:48 +03:00
|
|
|
n++;
|
|
|
|
|
|
|
|
map->do_count_items = 1;
|
|
|
|
map->private_size = n;
|
|
|
|
}
|
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/* String interning */
|
2014-07-03 02:22:54 +04:00
|
|
|
|
2017-06-30 22:14:07 +03:00
|
|
|
/*
|
|
|
|
* Returns the unique, interned version of the specified string or data,
|
|
|
|
* similar to the `String.intern` API in Java and .NET, respectively.
|
|
|
|
* Interned strings remain valid for the entire lifetime of the process.
|
|
|
|
*
|
|
|
|
* Can be used as `[x]strdup()` or `xmemdupz` replacement, except that interned
|
|
|
|
* strings / data must not be modified or freed.
|
|
|
|
*
|
|
|
|
* Interned strings are best used for short strings with high probability of
|
|
|
|
* duplicates.
|
|
|
|
*
|
|
|
|
* Uses a hashmap to store the pool of interned strings.
|
|
|
|
*/
|
2019-04-29 11:28:14 +03:00
|
|
|
const void *memintern(const void *data, size_t len);
|
2014-07-03 02:22:54 +04:00
|
|
|
static inline const char *strintern(const char *string)
|
|
|
|
{
|
|
|
|
return memintern(string, strlen(string));
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:17:54 +04:00
|
|
|
#endif
|