Allow passing stack-allocated structs to ht_new

This commit is contained in:
2025-09-07 00:22:44 -07:00
parent 9a1b271cfd
commit c6bdb38bb7

View File

@ -59,9 +59,9 @@ typedef struct Bucket {
* A hash table. * A hash table.
*/ */
struct HTTable { struct HTTable {
const HTTableFunctions *fns; //!< Set of functions to use for hashing, etc. HTTableFunctions fns; //!< Set of functions to use for hashing, etc.
const HTAllocator *alloc; //!< Set of memory allocation functions. HTAllocator alloc; //!< Set of memory allocation functions.
const HTThreshold *thresh; //!< Thresholds for resizing, etc. HTThreshold thresh; //!< Thresholds for resizing, etc.
size_t table_size; //<! Number of buckets. size_t table_size; //<! Number of buckets.
size_t count; //<! Number of elements. size_t count; //<! Number of elements.
@ -75,7 +75,7 @@ struct HTTable {
* @return The newly allocated memory, or NULL if an error occurred * @return The newly allocated memory, or NULL if an error occurred
*/ */
static inline void *malloc_for_table(const HTTable *ht, size_t size) { static inline void *malloc_for_table(const HTTable *ht, size_t size) {
return ht->alloc->malloc(size, ht->alloc->user_data); return ht->alloc.malloc(size, ht->alloc.user_data);
} }
/** /**
@ -99,7 +99,7 @@ static void *zeroed_malloc(const HTTable *ht, size_t size) {
* @param ptr The pointer to free * @param ptr The pointer to free
*/ */
static inline void free_for_table(const HTTable *ht, void *ptr) { static inline void free_for_table(const HTTable *ht, void *ptr) {
ht->alloc->free(ptr, ht->alloc->user_data); ht->alloc.free(ptr, ht->alloc.user_data);
} }
/** /**
@ -108,8 +108,8 @@ static inline void free_for_table(const HTTable *ht, void *ptr) {
* @param key The key to free * @param key The key to free
*/ */
static inline void free_table_key(const HTTable *ht, void *key) { static inline void free_table_key(const HTTable *ht, void *key) {
if (ht->fns->destroy_key) { if (ht->fns.destroy_key) {
ht->fns->destroy_key(key, ht->fns->user_data); ht->fns.destroy_key(key, ht->fns.user_data);
} }
} }
@ -119,13 +119,14 @@ static inline void free_table_key(const HTTable *ht, void *key) {
* @param value The value to free * @param value The value to free
*/ */
static inline void free_table_value(const HTTable *ht, void *value) { static inline void free_table_value(const HTTable *ht, void *value) {
if (ht->fns->destroy_value) { if (ht->fns.destroy_value) {
ht->fns->destroy_value(value, ht->fns->user_data); ht->fns.destroy_value(value, ht->fns.user_data);
} }
} }
/** /**
* Create and return a new hash table. * Create and return a new hash table. A copy of all of the passed values is
* made.
* @param fns The HTTableFunctions to use for the table * @param fns The HTTableFunctions to use for the table
* @param alloc The HTAllocator to use, or NULL to use the standard C malloc and * @param alloc The HTAllocator to use, or NULL to use the standard C malloc and
* free * free
@ -144,12 +145,12 @@ HTTable *ht_new(const HTTableFunctions *fns, const HTAllocator *alloc,
if (!ht) { if (!ht) {
return NULL; return NULL;
} }
ht->fns = fns; ht->fns = *fns;
ht->alloc = alloc; ht->alloc = *alloc;
ht->thresh = thresh; ht->thresh = *thresh;
ht->count = 0; ht->count = 0;
ht->table_size = ht->thresh->initial_size; ht->table_size = ht->thresh.initial_size;
ht->buckets = zeroed_malloc(ht, sizeof(Bucket *) * ht->table_size); ht->buckets = zeroed_malloc(ht, sizeof(Bucket *) * ht->table_size);
if (!ht->buckets) { if (!ht->buckets) {
free_for_table(ht, ht); free_for_table(ht, ht);
@ -281,7 +282,7 @@ bool ht_equal(const HTTable *ht1, const HTTable *ht2) {
* @return The growth threshold * @return The growth threshold
*/ */
static inline size_t calculate_growth_threshold(HTTable *ht) { static inline size_t calculate_growth_threshold(HTTable *ht) {
return (size_t) (ht->table_size * ht->thresh->growth_threshold); return (size_t) (ht->table_size * ht->thresh.growth_threshold);
} }
/** /**
@ -291,7 +292,7 @@ static inline size_t calculate_growth_threshold(HTTable *ht) {
* @return The shrink threshold * @return The shrink threshold
*/ */
static inline size_t calculate_shrink_threshold(HTTable *ht) { static inline size_t calculate_shrink_threshold(HTTable *ht) {
return (size_t) (ht->table_size * ht->thresh->shrink_threshold); return (size_t) (ht->table_size * ht->thresh.shrink_threshold);
} }
/** /**
@ -301,8 +302,8 @@ static inline size_t calculate_shrink_threshold(HTTable *ht) {
* @return Wether the operation succeeded (true means success) * @return Wether the operation succeeded (true means success)
*/ */
static bool resize_to(HTTable *ht, size_t new_size) { static bool resize_to(HTTable *ht, size_t new_size) {
if (new_size < ht->thresh->initial_size) { if (new_size < ht->thresh.initial_size) {
new_size = ht->thresh->initial_size; new_size = ht->thresh.initial_size;
} }
if (ht->table_size == new_size) { if (ht->table_size == new_size) {
return true; return true;
@ -331,11 +332,11 @@ static bool resize_to(HTTable *ht, size_t new_size) {
static bool maybe_resize(HTTable *ht, size_t delta) { static bool maybe_resize(HTTable *ht, size_t delta) {
size_t new_count = ht->count + delta; size_t new_count = ht->count + delta;
if (new_count >= calculate_growth_threshold(ht)) { if (new_count >= calculate_growth_threshold(ht)) {
size_t new_size = ht->table_size * ht->thresh->growth_factor; size_t new_size = ht->table_size * ht->thresh.growth_factor;
return resize_to(ht, new_size); return resize_to(ht, new_size);
} else if (new_count <= calculate_shrink_threshold(ht) } else if (new_count <= calculate_shrink_threshold(ht)
&& ht->table_size != ht->thresh->initial_size) { && ht->table_size != ht->thresh.initial_size) {
size_t new_size = ht->table_size / ht->thresh->growth_factor; size_t new_size = ht->table_size / ht->thresh.growth_factor;
return resize_to(ht, new_size); return resize_to(ht, new_size);
} }
return true; return true;
@ -352,7 +353,7 @@ static bool maybe_resize(HTTable *ht, size_t delta) {
static bool bucket_matches(const HTTable *ht, Bucket *bucket, uint64_t hash, static bool bucket_matches(const HTTable *ht, Bucket *bucket, uint64_t hash,
const void *key) { const void *key) {
return bucket && bucket->hash == hash return bucket && bucket->hash == hash
&& ht->fns->equal(bucket->key, key, ht->fns->user_data); && ht->fns.equal(bucket->key, key, ht->fns.user_data);
} }
/** /**
@ -387,7 +388,7 @@ static Bucket **find_bucket(const HTTable *ht, uint64_t hash, const void *key) {
* @return The key's hash * @return The key's hash
*/ */
static inline uint64_t hash_for_table(const HTTable *ht, const void *key) { static inline uint64_t hash_for_table(const HTTable *ht, const void *key) {
return ht->fns->hash(key, ht->fns->user_data); return ht->fns.hash(key, ht->fns.user_data);
} }
/** /**
@ -436,7 +437,7 @@ bool ht_clear(HTTable *ht) {
free_bucket(ht, bucket); free_bucket(ht, bucket);
} }
free_for_table(ht, ht->buckets); free_for_table(ht, ht->buckets);
ht->table_size = ht->thresh->initial_size; ht->table_size = ht->thresh.initial_size;
ht->buckets = zeroed_malloc(ht, sizeof(Bucket) * ht->table_size); ht->buckets = zeroed_malloc(ht, sizeof(Bucket) * ht->table_size);
ht->count = 0; ht->count = 0;
if (!ht->buckets) { if (!ht->buckets) {
@ -544,7 +545,7 @@ bool ht_steal_all(HTTable *ht, void ***keys, void ***values) {
} }
} }
Bucket **new_buckets = Bucket **new_buckets =
zeroed_malloc(ht, sizeof(Bucket) * ht->thresh->initial_size); zeroed_malloc(ht, sizeof(Bucket) * ht->thresh.initial_size);
if (!new_buckets) { if (!new_buckets) {
if (keys) { if (keys) {
free_for_table(ht, *keys); free_for_table(ht, *keys);
@ -556,7 +557,7 @@ bool ht_steal_all(HTTable *ht, void ***keys, void ***values) {
} }
size_t old_table_size = ht->table_size; size_t old_table_size = ht->table_size;
Bucket **old_buckets = ht->buckets; Bucket **old_buckets = ht->buckets;
ht->table_size = ht->thresh->initial_size; ht->table_size = ht->thresh.initial_size;
ht->count = 0; ht->count = 0;
ht->buckets = new_buckets; ht->buckets = new_buckets;
size_t out_i = 0; size_t out_i = 0;
@ -731,7 +732,7 @@ bool ht_find(HTTable *ht, ht_foreach_callback_t callback, void *user_data,
*/ */
bool ht_steal_from(HTTable *ht, HTTable *src) { bool ht_steal_from(HTTable *ht, HTTable *src) {
Bucket **new_buckets = Bucket **new_buckets =
zeroed_malloc(src, sizeof(Bucket) * src->thresh->initial_size); zeroed_malloc(src, sizeof(Bucket) * src->thresh.initial_size);
if (!new_buckets) { if (!new_buckets) {
return NULL; return NULL;
} }
@ -739,7 +740,7 @@ bool ht_steal_from(HTTable *ht, HTTable *src) {
Bucket **old_buckets = src->buckets; Bucket **old_buckets = src->buckets;
src->buckets = new_buckets; src->buckets = new_buckets;
src->count = 0; src->count = 0;
src->table_size = src->thresh->initial_size; src->table_size = src->thresh.initial_size;
bool no_errors = true; bool no_errors = true;
for (size_t i = 0; i < old_table_size; ++i) { for (size_t i = 0; i < old_table_size; ++i) {