255 lines
7.1 KiB
C
255 lines
7.1 KiB
C
#ifndef UTIL_H
|
|
#define UTIL_H
|
|
|
|
#include "sys.h"
|
|
#include "string.h"
|
|
#include "memory.h"
|
|
#include "arena.h"
|
|
#include "atomic.h"
|
|
#include "math.h"
|
|
#include "scratch.h"
|
|
|
|
/* Utility functions and stuff that don't have a home :( */
|
|
|
|
/* ========================== *
|
|
* Hash utils
|
|
* ========================== */
|
|
|
|
/* FNV-1a parameters for different hash sizes:
|
|
* https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function#FNV_hash_parameters
|
|
*/
|
|
|
|
#define HASH_FNV64_BASIS 0xCBF29CE484222325
|
|
INLINE u64 hash_fnv64(u64 seed, struct buffer buff)
|
|
{
|
|
u64 hash = seed;
|
|
for (u64 i = 0; i < buff.size; ++i) {
|
|
hash ^= (u8)buff.data[i];
|
|
hash *= 0x100000001B3;
|
|
}
|
|
return hash;
|
|
}
|
|
|
|
#define HASH_FNV128_BASIS U128(0x6C62272E07BB0142, 0x62B821756295C58D)
|
|
INLINE u128 hash_fnv128(u128 seed, struct buffer buff)
|
|
{
|
|
/* FIXME: Verify MSVC version of 128 is same */
|
|
u128 hash = seed;
|
|
for (u64 i = 0; i < buff.size; ++i) {
|
|
u8 c = (u8)buff.data[i];
|
|
hash = u128_xor_u8(hash, c);
|
|
hash = u128_mul(hash, U128(0x1000000, 0x000000000000013B));
|
|
}
|
|
return hash;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Merge sort
|
|
* ========================== */
|
|
|
|
/* Compare functions should
|
|
* return an int < 0 if a < b
|
|
* return an int = 0 if a == b
|
|
* return an int > 0 if a > b
|
|
*/
|
|
#define SORT_COMPARE_FUNC_DEF(name, arg_a, arg_b, arg_udata) i32 name(void *arg_a, void *arg_b, void *arg_udata)
|
|
typedef SORT_COMPARE_FUNC_DEF(sort_compare_func, a, b, udata);
|
|
|
|
INLINE void merge_sort_internal(u8 *left, u8 *right, u8 *items, u64 left_count, u64 right_count, u64 item_size, sort_compare_func *callback, void *udata)
|
|
{
|
|
/* Sort */
|
|
u64 i = 0;
|
|
u64 l = 0;
|
|
u64 r = 0;
|
|
while (l < left_count && r < right_count) {
|
|
u8 *dst = items + (i * item_size);
|
|
u8 *left_item = left + (l * item_size);
|
|
u8 *right_item = right + (r * item_size);
|
|
++i;
|
|
if (callback(left_item, right_item, udata) > 0) {
|
|
MEMCPY(dst, left_item, item_size);
|
|
++l;
|
|
} else {
|
|
MEMCPY(dst, right_item, item_size);
|
|
++r;
|
|
}
|
|
}
|
|
/* Copy remaining */
|
|
if (l != left_count) {
|
|
u64 remaining_count = left_count - l;
|
|
u64 remaining_bytes = remaining_count * item_size;
|
|
u8 *dst = items + (i * item_size);
|
|
u8 *src = left + (l * item_size);
|
|
MEMCPY(dst, src, remaining_bytes);
|
|
} else if (r != right_count) {
|
|
u64 remaining_count = right_count - r;
|
|
u64 remaining_bytes = remaining_count * item_size;
|
|
u8 *dst = items + (i * item_size);
|
|
u8 *src = right + (r * item_size);
|
|
MEMCPY(dst, src, remaining_bytes);
|
|
}
|
|
}
|
|
|
|
INLINE void merge_sort(void *items, u64 item_count, u64 item_size, sort_compare_func *callback, void *udata)
|
|
{
|
|
if (item_count > 1) {
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
u64 left_count = item_count / 2;
|
|
u64 right_count = item_count - left_count;
|
|
|
|
u64 left_size = left_count * item_size;
|
|
u64 right_size = right_count * item_size;
|
|
|
|
u8 *left = arena_push_array(scratch.arena, u8, left_size);
|
|
u8 *right = arena_push_array(scratch.arena, u8, right_size);
|
|
MEMCPY(left, items, left_size);
|
|
MEMCPY(right, (u8 *)items + left_size, right_size);
|
|
|
|
merge_sort(left, left_count, item_size, callback, udata);
|
|
merge_sort(right, right_count, item_size, callback, udata);
|
|
merge_sort_internal(left, right, (u8 *)items, left_count, right_count, item_size, callback, udata);
|
|
scratch_end(scratch);
|
|
}
|
|
}
|
|
|
|
/* ========================== *
|
|
* Fixed Dict
|
|
*
|
|
* Simple fixed bucket-count string->value chaining dict for generic use
|
|
* ========================== */
|
|
|
|
struct fixed_dict_entry {
|
|
struct string key;
|
|
void *value;
|
|
u64 hash;
|
|
struct fixed_dict_entry *next;
|
|
};
|
|
|
|
struct fixed_dict_bucket {
|
|
struct fixed_dict_entry *entry_head;
|
|
};
|
|
|
|
struct fixed_dict {
|
|
u64 buckets_count;
|
|
struct fixed_dict_bucket *buckets;
|
|
};
|
|
|
|
INLINE struct fixed_dict fixed_dict_init(struct arena *arena, u64 buckets_count)
|
|
{
|
|
__prof;
|
|
struct fixed_dict dict = ZI;
|
|
buckets_count = max_u64(buckets_count, 1); /* Ensure at least 1 bucket */
|
|
dict.buckets_count = buckets_count;
|
|
dict.buckets = arena_push_array_zero(arena, struct fixed_dict_bucket, buckets_count);
|
|
return dict;
|
|
}
|
|
|
|
/* arena and key must share lifetime with dict (this function does not copy the key) */
|
|
INLINE void fixed_dict_set(struct arena *arena, struct fixed_dict *dict, struct string key, void *value)
|
|
{
|
|
__prof;
|
|
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, BUFFER_FROM_STRING(key));
|
|
u64 index = hash % dict->buckets_count;
|
|
struct fixed_dict_bucket *bucket = &dict->buckets[index];
|
|
|
|
struct fixed_dict_entry *entry = bucket->entry_head;
|
|
while (entry) {
|
|
if (hash == entry->hash) {
|
|
/* Existing match found, replace its contents */
|
|
entry->key = key;
|
|
entry->value = value;
|
|
return;
|
|
}
|
|
entry = entry->next;
|
|
}
|
|
|
|
/* No match found, create new entry */
|
|
entry = arena_push(arena, struct fixed_dict_entry);
|
|
entry->key = key;
|
|
entry->value = value;
|
|
entry->hash = hash;
|
|
entry->next = bucket->entry_head;
|
|
|
|
bucket->entry_head = entry;
|
|
}
|
|
|
|
INLINE void *fixed_dict_get(const struct fixed_dict *dict, struct string key)
|
|
{
|
|
__prof;
|
|
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, BUFFER_FROM_STRING(key));
|
|
u64 index = hash % dict->buckets_count;
|
|
struct fixed_dict_bucket *bucket = &dict->buckets[index];
|
|
|
|
for (struct fixed_dict_entry *entry = bucket->entry_head; entry; entry = entry->next) {
|
|
if (hash == entry->hash) {
|
|
/* Match found */
|
|
return entry->value;
|
|
}
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Sync flag
|
|
* ========================== */
|
|
|
|
struct sync_flag {
|
|
struct sys_mutex mutex;
|
|
struct sys_condition_variable cv;
|
|
b32 flag;
|
|
};
|
|
|
|
INLINE struct sync_flag sync_flag_alloc(void)
|
|
{
|
|
struct sync_flag sf = ZI;
|
|
sf.mutex = sys_mutex_alloc();
|
|
sf.cv = sys_condition_variable_alloc();
|
|
return sf;
|
|
}
|
|
|
|
INLINE void sync_flag_release(struct sync_flag *sf)
|
|
{
|
|
sys_mutex_release(&sf->mutex);
|
|
sys_condition_variable_release(&sf->cv);
|
|
}
|
|
|
|
INLINE void sync_flag_set(struct sync_flag *sf)
|
|
{
|
|
__prof;
|
|
struct sys_lock lock = sys_mutex_lock_e(&sf->mutex);
|
|
sf->flag = 1;
|
|
sys_condition_variable_broadcast(&sf->cv);
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
|
|
INLINE void sync_flag_wait(struct sync_flag *sf)
|
|
{
|
|
__prof;
|
|
struct sys_lock lock = sys_mutex_lock_s(&sf->mutex);
|
|
while (sf->flag != 1) {
|
|
sys_condition_variable_wait(&sf->cv, &lock);
|
|
}
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Sleep frame
|
|
* ========================== */
|
|
|
|
INLINE void sleep_frame(sys_timestamp_t last_frame_time, f64 target_dt)
|
|
{
|
|
__prof;
|
|
if (last_frame_time != 0 && target_dt > 0) {
|
|
f64 last_frame_dt = sys_timestamp_seconds(sys_timestamp() - last_frame_time);
|
|
f64 sleep_time = target_dt - last_frame_dt;
|
|
if (sleep_time > 0) {
|
|
sys_sleep_precise(sleep_time);
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif
|