891 lines
29 KiB
C
891 lines
29 KiB
C
/**
|
|
* @file
|
|
* Reference counting subroutines.
|
|
*/
|
|
#include "refcount/refcount.h"
|
|
|
|
#include "refcount/allocator.h"
|
|
|
|
#include <stdbool.h>
|
|
#include <stdlib.h>
|
|
|
|
/**
|
|
* Internal magic macro.
|
|
*/
|
|
#define ENTRY (REFCOUNT_OBJECT_ENTRY(ctx, obj))
|
|
|
|
// Some utility macros for threads
|
|
#ifdef REFCOUNT_HAS_THREADS
|
|
# define lock_mutex(m) (mtx_lock(m) == thrd_success)
|
|
# define unlock_mutex(m) (mtx_unlock(m))
|
|
# define store_wr_count(obj, c) \
|
|
atomic_store_explicit(obj, (c), memory_order_relaxed)
|
|
# define inc_wr_count(obj) \
|
|
(atomic_fetch_add_explicit(obj, 1, memory_order_relaxed))
|
|
# define dec_wr_count(obj) \
|
|
(atomic_fetch_sub_explicit(obj, 1, memory_order_relaxed))
|
|
# define lock_entry_mtx(obj) (lock_mutex(&obj->weak_ref->mtx))
|
|
# define unlock_entry_mtx(obj) (unlock_mutex(&obj->weak_ref->mtx))
|
|
# define store_flag_maybe_atomic(obj, value) \
|
|
(atomic_store_explicit(obj, value, memory_order_relaxed))
|
|
#else
|
|
# define lock_mutex(m) true
|
|
# define unlock_mutex(m) true
|
|
# define store_wr_count(obj, c) (*(obj) = (c))
|
|
# define inc_wr_count(obj) ((*(obj))++)
|
|
# define dec_wr_count(obj) ((*(obj))--)
|
|
# define lock_entry_mtx(obj) true
|
|
# define unlock_entry_mtx(obj) true
|
|
# define store_flag_maybe_atomic(obj, value) (*(obj) = (value))
|
|
#endif
|
|
|
|
/**
|
|
* Default context to use for functions that do not take a context. You must
|
|
* initialize this before use.
|
|
*
|
|
* > The default value of this is NULL.
|
|
*/
|
|
RefcountContext *refcount_default_context = NULL;
|
|
|
|
/**
|
|
* Create a new context.
|
|
* @param entry_offset The offset to the #RefcountEntry member
|
|
* @param held_refs_callback A function that will list all references held by an
|
|
* object
|
|
* @param destroy_callback A function to be called when an objects reference
|
|
* count drops to zero
|
|
* @param user_data Extra data to pass to the callbacks
|
|
* @param alloc The #RefcountAllocator to use for all internal allocations
|
|
* @return The new context, or NULL in the case of an error
|
|
*/
|
|
RefcountContext *
|
|
refcount_make_context(size_t entry_offset,
|
|
refcount_held_refs_callback_t held_refs_callback,
|
|
refcount_destroy_callback_t destroy_callback,
|
|
void *user_data, const RefcountAllocator *alloc) {
|
|
if (!alloc) {
|
|
alloc = refcount_global_allocator;
|
|
}
|
|
RefcountContext *ctx = refcount_malloc(alloc, sizeof(RefcountContext));
|
|
if (!ctx) {
|
|
return NULL;
|
|
}
|
|
#ifdef REFCOUNT_HAS_THREADS
|
|
if (mtx_init(&ctx->so_mtx, mtx_plain) != thrd_success) {
|
|
refcount_free(alloc, ctx);
|
|
return NULL;
|
|
}
|
|
if (mtx_init(&ctx->gr_mtx, mtx_recursive) != thrd_success) {
|
|
refcount_free(alloc, ctx);
|
|
mtx_destroy(&ctx->so_mtx);
|
|
return NULL;
|
|
}
|
|
#endif
|
|
ctx->entry_offset = entry_offset;
|
|
ctx->held_refs_callback = held_refs_callback;
|
|
ctx->destroy_callback = destroy_callback;
|
|
ctx->user_data = user_data;
|
|
ctx->alloc = *alloc;
|
|
refcount_allocator_to_ht_allocator(&ctx->alloc, &ctx->ht_alloc);
|
|
|
|
ctx->static_objects = NULL;
|
|
ctx->gc_roots = NULL;
|
|
ctx->doing_gc = false;
|
|
return ctx;
|
|
}
|
|
|
|
/**
|
|
* Callback that deinits a static, but sets its static_entry field to NULL
|
|
* first. Used in #refcount_context_destroy.
|
|
*/
|
|
static void deinit_static_for_context_destroy(void *obj, void *ctx_raw) {
|
|
RefcountContext *ctx = ctx_raw;
|
|
ENTRY->impl.static_entry = NULL;
|
|
refcount_context_deinit_static(ctx, obj);
|
|
}
|
|
|
|
/**
|
|
* Cleanup a #RefcountContext and free any associated resources. This first
|
|
* frees all static objects, then runs the garbage collector.
|
|
* @param ctx The #RefcountContext
|
|
*/
|
|
void refcount_context_destroy(RefcountContext *ctx) {
|
|
refcount_list_free_with_data_full(ctx->static_objects,
|
|
deinit_static_for_context_destroy, ctx,
|
|
&ctx->alloc);
|
|
|
|
refcount_context_garbage_collect(ctx);
|
|
|
|
#ifdef REFCOUNT_HAS_THREADS
|
|
mtx_destroy(&ctx->so_mtx);
|
|
mtx_destroy(&ctx->gr_mtx);
|
|
#endif
|
|
|
|
refcount_free(&ctx->alloc, ctx);
|
|
}
|
|
|
|
/**
|
|
* Initialize the weak_ref field on an object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object
|
|
* @return True on success
|
|
*/
|
|
static bool init_obj_weakref(const RefcountContext *ctx, void *obj) {
|
|
ENTRY->weak_ref = refcount_malloc(&ctx->alloc, sizeof(RefcountWeakref));
|
|
if (!ENTRY->weak_ref) {
|
|
return false;
|
|
}
|
|
ENTRY->weak_ref->data = obj;
|
|
#ifdef REFCOUNT_HAS_THREADS
|
|
if (mtx_init(&ENTRY->weak_ref->mtx, mtx_recursive) != thrd_success) {
|
|
refcount_free(&ctx->alloc, ENTRY->weak_ref);
|
|
return false;
|
|
}
|
|
#endif
|
|
store_wr_count(&ENTRY->weak_ref->ref_count, 1);
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* @struct DestructorEntry
|
|
* Hash table entry used to hold descructor callback information.
|
|
*/
|
|
struct DestructorEntry {
|
|
refcount_destructor_callback_t callback; //!< The callback itself.
|
|
void *user_data; //!< User specified data to pass to the callback.
|
|
};
|
|
|
|
/**
|
|
* Free a #DestructorEntry using the given context.
|
|
* @param entry The entry to free
|
|
* @param ctx_raw The #RefcountContext
|
|
*/
|
|
static void free_destructor_entry_callback(void *entry, void *ctx_raw) {
|
|
const RefcountContext *ctx = ctx_raw;
|
|
refcount_free(&ctx->alloc, entry);
|
|
}
|
|
|
|
/**
|
|
* Initialize the destructor table for an object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to initialize
|
|
* @return True on success
|
|
*/
|
|
static bool init_obj_destructor_table(const RefcountContext *ctx, void *obj) {
|
|
ENTRY->destructors = ht_new(
|
|
&(HTTableFunctions) {
|
|
.equal = ht_intptr_equal_callback,
|
|
.hash = ht_intptr_hash_callback,
|
|
.destroy_key = NULL,
|
|
.destroy_value = free_destructor_entry_callback,
|
|
.user_data = (void *) ctx,
|
|
},
|
|
&ctx->ht_alloc, NULL);
|
|
return ENTRY->destructors;
|
|
}
|
|
|
|
/**
|
|
* Initialize the #RefcountEntry member of an object. After this call, the
|
|
* object will have a reference count of 1. Note that it is not safe to call
|
|
* this multiple times on the same object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to initialize
|
|
* @return True on success, false on failure. Note that you don't have to do
|
|
* anything special to clean up the #RefcountEntry structure on failure and it
|
|
* is safe to call this a second time on the same object (though it will
|
|
* probably fail for the same reason).
|
|
*/
|
|
bool refcount_context_init_obj(const RefcountContext *ctx, void *obj) {
|
|
if (obj) {
|
|
ENTRY->is_static = false;
|
|
ENTRY->impl.counted.gc_root = NULL;
|
|
ENTRY->impl.counted.ref_count = 1;
|
|
if (!init_obj_destructor_table(ctx, obj)) {
|
|
return false;
|
|
}
|
|
if (!init_obj_weakref(ctx, obj)) {
|
|
ht_free(ENTRY->destructors);
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* Register a static object in a context.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to register
|
|
* @return True on success, false otherwise
|
|
*/
|
|
bool refcount_context_init_static(RefcountContext *ctx, void *obj) {
|
|
if (!lock_mutex(&ctx->so_mtx)) {
|
|
return false;
|
|
}
|
|
bool success = false;
|
|
ENTRY->is_static = true;
|
|
if (!init_obj_destructor_table(ctx, obj)) {
|
|
goto end;
|
|
}
|
|
if (!init_obj_weakref(ctx, obj)) {
|
|
refcount_free(&ctx->alloc, ENTRY->destructors);
|
|
goto end;
|
|
}
|
|
RefcountList *new_static_objects =
|
|
refcount_list_push_full(ctx->static_objects, obj, &ctx->alloc);
|
|
if (!new_static_objects) {
|
|
goto end;
|
|
}
|
|
ctx->static_objects = new_static_objects;
|
|
ENTRY->impl.static_entry = ctx->static_objects;
|
|
success = true;
|
|
end:
|
|
unlock_mutex(&ctx->so_mtx);
|
|
return success;
|
|
}
|
|
|
|
/**
|
|
* Return the references held by an object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object
|
|
* @param refs Where to store the refs
|
|
* @return True on success
|
|
*/
|
|
static inline bool obj_held_refs(const RefcountContext *ctx, void *obj,
|
|
RefcountList **refs) {
|
|
if (ctx->held_refs_callback) {
|
|
return ctx->held_refs_callback(obj, refs, ctx->user_data);
|
|
}
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* Remove a reference from a weakref, possibly freeing it if it's reference
|
|
* count falls to zero.
|
|
* @param ctx The #RefcountContext
|
|
* @param wr The weak reference
|
|
*/
|
|
static void unref_weakref(const RefcountContext *ctx, RefcountWeakref *wr) {
|
|
if (dec_wr_count(&wr->ref_count) == 1) {
|
|
#ifdef REFCOUNT_HAS_THREADS
|
|
mtx_destroy(&wr->mtx);
|
|
#endif
|
|
refcount_free(&ctx->alloc, wr);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Used to pass two values to #call_object_destructors_foreach_callback.
|
|
*/
|
|
struct ContextAndObject {
|
|
const RefcountContext *ctx; //!< The #RefcountContext.
|
|
void *obj; //!< The object to check.
|
|
};
|
|
|
|
/**
|
|
* Callback used from #call_object_destructors.
|
|
* @param key The hash table key
|
|
* @param entry_raw The #DestructorEntry
|
|
* @param ctx_and_obj_raw The #ContextAndObject
|
|
* @return True if the object's refcount is non-zero, false otherwise
|
|
*/
|
|
static bool call_object_destructors_foreach_callback(void *key, void *entry_raw,
|
|
void *ctx_and_obj_raw) {
|
|
struct ContextAndObject *ctx_and_obj = ctx_and_obj_raw;
|
|
const struct DestructorEntry *entry = entry_raw;
|
|
entry->callback(ctx_and_obj->obj, entry->user_data);
|
|
// if the refcount has increased past 0, stop looping
|
|
return refcount_context_num_refs(ctx_and_obj->ctx, ctx_and_obj->obj);
|
|
}
|
|
|
|
/**
|
|
* Call descructors for an object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to call descructors for
|
|
*/
|
|
static void call_object_destructors(const RefcountContext *ctx, void *obj) {
|
|
ht_foreach(ENTRY->destructors, call_object_destructors_foreach_callback,
|
|
&(struct ContextAndObject) {.ctx = ctx, .obj = obj});
|
|
}
|
|
|
|
/**
|
|
* Unregister a static object in a context.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to unregister
|
|
* @return True on success, false otherwise
|
|
*/
|
|
bool refcount_context_deinit_static(RefcountContext *ctx, void *obj) {
|
|
if (!lock_mutex(&ctx->so_mtx)) {
|
|
return false;
|
|
}
|
|
bool success = false;
|
|
if (!refcount_context_is_static(ctx, obj)) {
|
|
goto end;
|
|
}
|
|
RefcountList *held_refs = NULL;
|
|
if (!obj_held_refs(ctx, obj, &held_refs)) {
|
|
goto end;
|
|
}
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
refcount_list_free_full(held_refs, NULL, &ctx->alloc);
|
|
goto end;
|
|
}
|
|
ENTRY->weak_ref->data = NULL;
|
|
call_object_destructors(ctx, obj);
|
|
ht_free(ENTRY->destructors);
|
|
unref_weakref(ctx, ENTRY->weak_ref);
|
|
unlock_entry_mtx(ENTRY);
|
|
// this is set to null if we are destroying the context
|
|
if (ENTRY->impl.static_entry) {
|
|
ctx->static_objects = refcount_list_remove_full(
|
|
ctx->static_objects, ENTRY->impl.static_entry, NULL, &ctx->alloc);
|
|
}
|
|
refcount_list_free_with_data_full(
|
|
held_refs, refcount_context_unref_as_callback, ctx, &ctx->alloc);
|
|
success = true;
|
|
end:
|
|
unlock_mutex(&ctx->so_mtx);
|
|
return success;
|
|
}
|
|
|
|
/**
|
|
* Increment the reference count of an object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to reference
|
|
* @return The input object
|
|
*/
|
|
void *refcount_context_ref(const RefcountContext *ctx, void *obj) {
|
|
if (!obj) {
|
|
return NULL;
|
|
}
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
return obj;
|
|
}
|
|
if (!ENTRY->is_static) {
|
|
++ENTRY->impl.counted.ref_count;
|
|
}
|
|
unlock_entry_mtx(ENTRY);
|
|
return obj;
|
|
}
|
|
|
|
/**
|
|
* Track an object as a GC root in a context. It is safe to call this on an
|
|
* already tracked object.
|
|
* @param ctx the #RefcountContext
|
|
* @param obj The object to track
|
|
* @return True on success
|
|
*/
|
|
static bool track_gc_root(RefcountContext *ctx, void *obj) {
|
|
if (!lock_mutex(&ctx->gr_mtx)) {
|
|
return false;
|
|
}
|
|
bool success = false;
|
|
if (!ENTRY->impl.counted.gc_root) {
|
|
ctx->gc_roots =
|
|
refcount_list_push_full(ctx->gc_roots, obj, &ctx->alloc);
|
|
if (!ctx->gc_roots) {
|
|
goto end;
|
|
}
|
|
ENTRY->impl.counted.gc_root = ctx->gc_roots;
|
|
}
|
|
success = true;
|
|
end:
|
|
unlock_mutex(&ctx->gr_mtx);
|
|
return success;
|
|
}
|
|
|
|
/**
|
|
* Remove an object from the GV root list of a context. It is safe to call this
|
|
* on an object that is not currently tracked.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to untrack
|
|
*/
|
|
static void remove_gc_root(RefcountContext *ctx, void *obj) {
|
|
if (lock_mutex(&ctx->gr_mtx)) {
|
|
ctx->gc_roots = refcount_list_remove_full(
|
|
ctx->gc_roots, ENTRY->impl.counted.gc_root, NULL, &ctx->alloc);
|
|
ENTRY->impl.counted.gc_root = NULL;
|
|
unlock_mutex(&ctx->gr_mtx);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Decrement the reference count of an object. If the reference count dropped to
|
|
* zero, add it to the queue. It is safe to call this on a static object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to unref
|
|
* @param queue A double pointer to a #RefcountList acting as a queue
|
|
* @return NULL if the reference count fell to 0, the given object otherwise
|
|
*/
|
|
static void *unref_to_queue(RefcountContext *ctx, void *obj,
|
|
RefcountList **queue) {
|
|
if (!obj) {
|
|
return NULL;
|
|
} else if (ENTRY->is_static) {
|
|
return obj;
|
|
}
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
// if this fails, we prefer a memory leak to causing undefined behavior
|
|
// and possibly crashing
|
|
return obj;
|
|
}
|
|
if (ENTRY->impl.counted.ref_count <= 1) {
|
|
ENTRY->impl.counted.ref_count = 0;
|
|
call_object_destructors(ctx, obj);
|
|
if (!ENTRY->impl.counted.ref_count) {
|
|
// if we still have no refs after calling destructors, it's really
|
|
// time to free this object
|
|
*queue = refcount_list_push_full(*queue, obj, &ctx->alloc);
|
|
}
|
|
unlock_entry_mtx(ENTRY);
|
|
return NULL;
|
|
} else {
|
|
--ENTRY->impl.counted.ref_count;
|
|
track_gc_root(ctx, obj);
|
|
unlock_entry_mtx(ENTRY);
|
|
return obj;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* A pair of a context and double pointer to a queue. This is for internal use.
|
|
* @see unref_to_queue_as_callback
|
|
*/
|
|
struct ContextAndQueue {
|
|
RefcountContext *ctx; //!< The context.
|
|
RefcountList **queue; //!< The queue.
|
|
};
|
|
|
|
/**
|
|
* Unref an object using a combined context and queue parameter.
|
|
* @param obj The object
|
|
* @param ctx_and_queue_raw The context and queue
|
|
*/
|
|
static void unref_to_queue_as_callback(void *obj, void *ctx_and_queue_raw) {
|
|
struct ContextAndQueue *ctx_and_queue = ctx_and_queue_raw;
|
|
unref_to_queue(ctx_and_queue->ctx, obj, ctx_and_queue->queue);
|
|
}
|
|
|
|
/**
|
|
* Destroy an object by calling it's destructor.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object to destroy
|
|
*/
|
|
static inline void destroy_object(RefcountContext *ctx, void *obj) {
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
return;
|
|
}
|
|
remove_gc_root(ctx, obj);
|
|
ENTRY->weak_ref->data = NULL;
|
|
ht_free(ENTRY->destructors);
|
|
unlock_entry_mtx(ENTRY);
|
|
unref_weakref(ctx, ENTRY->weak_ref);
|
|
if (ctx->destroy_callback) {
|
|
ctx->destroy_callback(obj, ctx->user_data);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Continually release held references and objects held in a queue.
|
|
* @param ctx The #RefcountContext
|
|
* @param queue The queue
|
|
* @param toplevel Toplevel object that triggered the unref
|
|
*/
|
|
static void process_unref_queue(RefcountContext *ctx, RefcountList *queue,
|
|
void *toplevel) {
|
|
struct ContextAndQueue ctx_and_queue = {.ctx = ctx, .queue = &queue};
|
|
while (queue) {
|
|
void *cur = refcount_list_peek(queue);
|
|
RefcountList *held_refs = NULL;
|
|
queue = refcount_list_pop_full(queue, NULL, &ctx->alloc);
|
|
if (!cur) {
|
|
continue;
|
|
}
|
|
if (obj_held_refs(ctx, cur, &held_refs)) {
|
|
// I don't really know how else to handle this as I can't think of a
|
|
// good way to undo all the unrefs that have already been processed,
|
|
// so we can't really make this atomic without going over all
|
|
// objects twice.
|
|
refcount_list_free_with_data_full(held_refs,
|
|
unref_to_queue_as_callback,
|
|
&ctx_and_queue, &ctx->alloc);
|
|
}
|
|
destroy_object(ctx, cur);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Decrement the reference count of a object. It is safe to call this on a
|
|
* static object.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object
|
|
* @return NULL if the object's reference counter fell to 0, otherwise the
|
|
* object
|
|
*/
|
|
void *refcount_context_unref(RefcountContext *ctx, void *obj) {
|
|
if (!obj) {
|
|
return NULL;
|
|
}
|
|
RefcountList *queue = NULL;
|
|
void *retval = unref_to_queue(ctx, obj, &queue);
|
|
process_unref_queue(ctx, queue, obj);
|
|
return retval;
|
|
}
|
|
|
|
/**
|
|
* Set of hash table functions used in #check_gc_root.
|
|
*/
|
|
static const HTTableFunctions ROOT_COUNTS_FNS = {
|
|
.hash = ht_intptr_hash_callback,
|
|
.equal = ht_intptr_equal_callback,
|
|
.destroy_key = NULL,
|
|
.destroy_value = NULL,
|
|
.user_data = NULL,
|
|
};
|
|
|
|
/**
|
|
* Holds data for #free_roots_foreach, used in #check_gc_root.
|
|
*/
|
|
struct ContextAndRootPtr {
|
|
RefcountContext *ctx; //!< The context.
|
|
RefcountList **root_ptr; //!< Double pointer to the root.
|
|
bool did_update; //!< Weather or not *root_ptr was changed.
|
|
};
|
|
|
|
/**
|
|
* Foreach function to free roots from a hash table. Used in #check_gc_root.
|
|
* @param obj The object to free
|
|
* @param ignored Ignored
|
|
* @param user_data A #ContextAndRootPtr
|
|
* @return Always false
|
|
*/
|
|
static bool free_roots_foreach(void *obj, void *ignored, void *user_data) {
|
|
if (!obj) {
|
|
return false;
|
|
}
|
|
struct ContextAndRootPtr *data = user_data;
|
|
if (*data->root_ptr
|
|
&& *data->root_ptr
|
|
== REFCOUNT_OBJECT_ENTRY(data->ctx, obj)->impl.counted.gc_root) {
|
|
*data->root_ptr = (*data->root_ptr)->next;
|
|
data->did_update = true;
|
|
}
|
|
destroy_object(data->ctx, obj);
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* @struct ContexAndFlag
|
|
* #RefcountContext and a boolean flag.
|
|
*/
|
|
struct ContextAndFlag {
|
|
const RefcountContext *ctx; //!< The context.
|
|
bool flag; //!< The flag.
|
|
};
|
|
|
|
/**
|
|
* Call the destructors for an object. Used from #call_destructors_for_gc.
|
|
* @param obj The object
|
|
* @param ignored Ignored
|
|
* @param ctx_raw A #ContexAndFlag. The flag is set to the same value as the
|
|
* return value.
|
|
* @return True if the object's reference count increased above 0
|
|
*/
|
|
static bool call_destructors_for_gc_callback(void *obj, void *ignored,
|
|
void *ctx_and_flag_raw) {
|
|
struct ContextAndFlag *ctx_and_flag = ctx_and_flag_raw;
|
|
const RefcountContext *ctx = ctx_and_flag->ctx;
|
|
uint64_t old_ref_count = ENTRY->impl.counted.ref_count;
|
|
ENTRY->impl.counted.ref_count = 0;
|
|
call_object_destructors(ctx, obj);
|
|
ctx_and_flag->flag = ENTRY->impl.counted.ref_count;
|
|
if (!ENTRY->impl.counted.ref_count) {
|
|
// the object can still be saved by another object in the reference loop
|
|
// having its refcount incremented. Therefore, we restore the original
|
|
// reference count.
|
|
ENTRY->impl.counted.ref_count = old_ref_count;
|
|
}
|
|
return ctx_and_flag->flag;
|
|
}
|
|
|
|
/**
|
|
* Call destructors for a hash table where the keys are objects. Stop if any
|
|
* object had it's reference count increase past 0.
|
|
*
|
|
* > Calling this will set the reference count of all objects in counts to 0!
|
|
*
|
|
* @param ctx The #RefcountContext
|
|
* @param counts The table for which to call destructors
|
|
* @return True if an object had its reference count increase above 0.
|
|
*/
|
|
static bool call_destructors_for_gc(const RefcountContext *ctx,
|
|
HTTable *counts) {
|
|
struct ContextAndFlag ctx_and_flag = {
|
|
.ctx = ctx,
|
|
.flag = false,
|
|
};
|
|
ht_foreach(counts, call_destructors_for_gc_callback, &ctx_and_flag);
|
|
return ctx_and_flag.flag;
|
|
}
|
|
|
|
/**
|
|
* Check the root pointed to by the double pointer root_ptr. After the call,
|
|
* root_ptr is set to the next root to be checked.
|
|
* @param ctx The context
|
|
* @param root_ptr Double pointer to one of the GC roots
|
|
* @return The number of object's freed, or -1 if an error happened
|
|
*/
|
|
static ptrdiff_t check_gc_root(RefcountContext *ctx, RefcountList **root_ptr) {
|
|
HTTable *counts = ht_new(&ROOT_COUNTS_FNS, &ctx->ht_alloc, NULL);
|
|
if (!counts) {
|
|
*root_ptr = (*root_ptr)->next;
|
|
return -1;
|
|
}
|
|
RefcountList *root = *root_ptr;
|
|
RefcountList *queue = NULL;
|
|
if (!obj_held_refs(ctx, root->data, &queue)) {
|
|
ht_free(counts);
|
|
*root_ptr = (*root_ptr)->next;
|
|
return -1;
|
|
}
|
|
size_t seen_objects = 0;
|
|
size_t clear_objects = 0;
|
|
// ignore allocation errors until I decide how to deal with them (in the far
|
|
// future)
|
|
while (queue) {
|
|
void *obj = queue->data;
|
|
queue = refcount_list_pop_full(queue, NULL, &ctx->alloc);
|
|
if (!obj || refcount_context_is_static(ctx, obj)) {
|
|
continue;
|
|
}
|
|
uintptr_t count;
|
|
if (ht_has(counts, obj)) {
|
|
count = HT_UUNSTUFF(ht_get(counts, obj));
|
|
} else {
|
|
count = REFCOUNT_OBJECT_ENTRY(ctx, obj)->impl.counted.ref_count;
|
|
++seen_objects;
|
|
// don't recuse into objects multiple times
|
|
obj_held_refs(ctx, obj, &queue);
|
|
}
|
|
if (count > 0) {
|
|
ht_insert(counts, obj, HT_STUFF(--count));
|
|
if (count == 0) {
|
|
++clear_objects;
|
|
}
|
|
}
|
|
}
|
|
ptrdiff_t freed_count = 0;
|
|
if (seen_objects == clear_objects
|
|
&& !call_destructors_for_gc(ctx, counts)) {
|
|
// all objects still have a refcount of zero, even after calling
|
|
// destructors, proceed with freeing them
|
|
struct ContextAndRootPtr data = {
|
|
.ctx = ctx, .root_ptr = root_ptr, .did_update = false};
|
|
ht_foreach(counts, free_roots_foreach, &data);
|
|
if (!data.did_update) {
|
|
*root_ptr = (*root_ptr)->next;
|
|
}
|
|
freed_count = seen_objects;
|
|
} else {
|
|
// either something still had a reference, or it was re-refed by its
|
|
// destructor. Either way, don't free anything
|
|
*root_ptr = (*root_ptr)->next;
|
|
}
|
|
ht_free(counts);
|
|
return freed_count;
|
|
}
|
|
|
|
/**
|
|
* Run the garbage collector on a context.
|
|
* @param ctx The #RefcountContext
|
|
* @return The number of object's freed, or -1 if an error occurred
|
|
*/
|
|
ptrdiff_t refcount_context_garbage_collect(RefcountContext *ctx) {
|
|
if (!ctx->held_refs_callback) {
|
|
// no loops possible
|
|
return 0;
|
|
}
|
|
if (!lock_mutex(&ctx->gr_mtx)) {
|
|
return -1;
|
|
}
|
|
store_flag_maybe_atomic(&ctx->doing_gc, true);
|
|
ptrdiff_t total_cleared = 0;
|
|
RefcountList *root = ctx->gc_roots;
|
|
while (root) {
|
|
ptrdiff_t res = check_gc_root(ctx, &root);
|
|
if (res < 0) {
|
|
return -1;
|
|
}
|
|
total_cleared += res;
|
|
}
|
|
store_flag_maybe_atomic(&ctx->doing_gc, false);
|
|
unlock_mutex(&ctx->gr_mtx);
|
|
return total_cleared;
|
|
}
|
|
|
|
/**
|
|
* Create a new weak reference for an object. A weak reference will allow safe
|
|
* access to the referenced object without holding a reference. That is, the
|
|
* referenced object can be accessed until it's reference count falls to 0 and
|
|
* it is freed. After this, attempts to use the weak reference will just return
|
|
* NULL to indicate that the referenced object is no longer in existence.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object for which to create a weak reference
|
|
* @return The newly created weak reference
|
|
*/
|
|
RefcountWeakref *refcount_context_make_weakref(const RefcountContext *ctx,
|
|
void *obj) {
|
|
inc_wr_count(&ENTRY->weak_ref->ref_count);
|
|
return ENTRY->weak_ref;
|
|
}
|
|
|
|
/**
|
|
* Destroy a weak reference. This has no effect on the reference count of the
|
|
* original object.
|
|
* @param ctx The #RefcountContext
|
|
* @param wr The weak reference
|
|
*/
|
|
void refcount_context_destroy_weakref(const RefcountContext *ctx,
|
|
RefcountWeakref *wr) {
|
|
unref_weakref(ctx, wr);
|
|
}
|
|
|
|
/**
|
|
* Return weather the object referenced by a weak reference still exists.
|
|
* @param ctx The #RefcountContext
|
|
* @param wr The weak reference
|
|
* @return Weather the reference is still valid
|
|
*/
|
|
bool refcount_context_weakref_is_valid(const RefcountContext *ctx,
|
|
RefcountWeakref *wr) {
|
|
// we need the locks because accessing the data member is not atomic
|
|
if (!lock_mutex(&wr->mtx)) {
|
|
return NULL; // we can't be sure, so play it safe
|
|
}
|
|
bool is_valid = wr->data;
|
|
unlock_mutex(&wr->mtx);
|
|
return is_valid;
|
|
}
|
|
|
|
/**
|
|
* Add a reference to an object referenced by a weak reference and return the
|
|
* object. If the referenced object no longer exists, return NULL.
|
|
* @param ctx The #RefcountContext
|
|
* @param wr The weak reference
|
|
* @return The newly referenced object, or NULL
|
|
*/
|
|
void *refcount_context_ref_weakref(const RefcountContext *ctx,
|
|
RefcountWeakref *wr) {
|
|
if (!lock_mutex(&wr->mtx)) {
|
|
return NULL; // we can't be sure, so play it safe
|
|
}
|
|
void *obj = NULL;
|
|
if (wr->data) {
|
|
obj = refcount_context_ref(ctx, wr->data);
|
|
}
|
|
unlock_mutex(&wr->mtx);
|
|
return obj;
|
|
}
|
|
|
|
/**
|
|
* Register a destructor to be called right before an object is freed. If the
|
|
* destructor adds a new reference to the object, the object is not freed. The
|
|
* destructor will then be run again the next time the object is about to be
|
|
* freed.
|
|
*
|
|
* Note that if a destructor already exists for the given key, it will be
|
|
* replaced without calling it.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object onto which to register the destructor
|
|
* @param key An arbitrary value that can be later used to unregister the
|
|
* destructor
|
|
* @param callback The destructor itself
|
|
* @param user_data Extra data to pass to the destructor
|
|
* @return True on success, false on failure. On failure, nothing is registered.
|
|
*/
|
|
bool refcount_context_add_destructor(const RefcountContext *ctx, void *obj,
|
|
void *key,
|
|
refcount_destructor_callback_t callback,
|
|
void *user_data) {
|
|
struct DestructorEntry *entry =
|
|
refcount_malloc(&ctx->alloc, sizeof(struct DestructorEntry));
|
|
if (!entry) {
|
|
return false;
|
|
}
|
|
entry->callback = callback;
|
|
entry->user_data = user_data;
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
refcount_free(&ctx->alloc, entry);
|
|
return false;
|
|
}
|
|
bool success = true;
|
|
if (!ht_insert(ENTRY->destructors, key, entry)) {
|
|
refcount_free(&ctx->alloc, entry);
|
|
success = false;
|
|
}
|
|
unlock_entry_mtx(ENTRY);
|
|
return success;
|
|
}
|
|
|
|
/**
|
|
* Unregister a destructor from the given object. The destructor will not be
|
|
* called. If no destructor exists for the given key, this will do nothing.
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The object for which to unregister the destructor
|
|
* @param key The destructors key
|
|
* @return True on success, false on error. On error, nothing is unregistered.
|
|
*/
|
|
bool refcount_context_remove_destructor(const RefcountContext *ctx, void *obj,
|
|
void *key) {
|
|
if (!lock_entry_mtx(ENTRY)) {
|
|
return false;
|
|
}
|
|
bool success = ht_remove(ENTRY->destructors, key);
|
|
unlock_entry_mtx(ENTRY);
|
|
return success;
|
|
}
|
|
|
|
// Debug Functions
|
|
|
|
/**
|
|
* Count all instances of a target object by walking the references of some root
|
|
* object. This is for debug purposes only. The root is not included in the
|
|
* count (as in, if `obj == target`, it will not be counted).
|
|
* @param ctx The #RefcountContext
|
|
* @param obj The root object
|
|
* @param target The object to look for
|
|
* @return The number of times the target appeared in the reference tree of the
|
|
* root
|
|
*/
|
|
uint64_t refcount_debug_context_count_object(const RefcountContext *ctx,
|
|
void *obj, void *target) {
|
|
static const HTTableFunctions SEEN_FNS = {
|
|
.destroy_key = NULL,
|
|
.destroy_value = NULL,
|
|
.equal = ht_intptr_equal_callback,
|
|
.hash = ht_intptr_hash_callback,
|
|
.user_data = NULL,
|
|
};
|
|
if (!obj) {
|
|
return 0;
|
|
}
|
|
RefcountList *queue = NULL;
|
|
obj_held_refs(ctx, obj, &queue);
|
|
uint64_t total_count = 0;
|
|
HTTable *seen = ht_new(&SEEN_FNS, &ctx->ht_alloc, NULL);
|
|
while (queue) {
|
|
void *cur = queue->data;
|
|
queue = refcount_list_pop_full(queue, NULL, &ctx->alloc);
|
|
// count NULL
|
|
if (cur == target) {
|
|
++total_count;
|
|
}
|
|
// but don't try to descend into it
|
|
if (cur && !ht_has(seen, cur)) {
|
|
ht_insert(seen, cur, NULL);
|
|
obj_held_refs(ctx, cur, &queue);
|
|
}
|
|
}
|
|
ht_free(seen);
|
|
return total_count;
|
|
}
|