re-add sim pausing & stepping

This commit is contained in:
jacob 2025-05-15 05:27:33 -05:00
parent 50713144f6
commit 5e81b78ac1
7 changed files with 153 additions and 105 deletions

View File

@ -6,42 +6,34 @@
/* TODO: Remove "..._raw" functions */ /* TODO: Remove "..._raw" functions */
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return (i32)_InterlockedIncrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return (i32)_InterlockedDecrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); } FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); } FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
FORCE_INLINE volatile i32 *atomic_i32_raw(struct atomic_i32 *x) { return &x->_v; } FORCE_INLINE i32 atomic_i32_eval_xor(struct atomic_i32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); }
FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); } FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
FORCE_INLINE i64 atomic_i64_inc_eval(struct atomic_i64 *x) { return (i64)_InterlockedIncrement64(&x->_v); }
FORCE_INLINE i64 atomic_i64_dec_eval(struct atomic_i64 *x) { return (i64)_InterlockedDecrement64(&x->_v); }
FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
FORCE_INLINE i64 atomic_i64_eval_exchange(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); } FORCE_INLINE i64 atomic_i64_eval_exchange(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
FORCE_INLINE i64 atomic_i64_eval_compare_exchange(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); } FORCE_INLINE i64 atomic_i64_eval_compare_exchange(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
FORCE_INLINE volatile i64 *atomic_i64_raw(struct atomic_i64 *x) { return &x->_v; } FORCE_INLINE i64 atomic_i64_eval_xor(struct atomic_i64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
FORCE_INLINE u32 atomic_u32_eval(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } FORCE_INLINE u32 atomic_u32_eval(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE u32 atomic_u32_inc_eval(struct atomic_u32 *x) { return (u32)_InterlockedIncrement((volatile long *)&x->_v); }
FORCE_INLINE u32 atomic_u32_dec_eval(struct atomic_u32 *x) { return (u32)_InterlockedDecrement((volatile long *)&x->_v); }
FORCE_INLINE u32 atomic_u32_eval_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u32 atomic_u32_eval_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); } FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); }
FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); } FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); }
FORCE_INLINE volatile u32 *atomic_u32_raw(struct atomic_u32 *x) { return &x->_v; } FORCE_INLINE u32 atomic_u32_eval_xor(struct atomic_u32 *x, u32 c) { return (u32)_InterlockedXor((volatile long *)&x->_v, c); }
FORCE_INLINE u32 atomic_u32_eval_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u32 atomic_u32_eval_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); }
FORCE_INLINE u64 atomic_u64_inc_eval(struct atomic_u64 *x) { return (u64)_InterlockedIncrement64((volatile i64 *)&x->_v); }
FORCE_INLINE u64 atomic_u64_dec_eval(struct atomic_u64 *x) { return (u64)_InterlockedDecrement64((volatile i64 *)&x->_v); }
FORCE_INLINE u64 atomic_u64_eval_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE u64 atomic_u64_eval_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); }
FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); }
FORCE_INLINE volatile u64 *atomic_u64_raw(struct atomic_u64 *x) { return &x->_v; } FORCE_INLINE u32 atomic_u64_eval_xor(struct atomic_u64 *x, u64 c) { return (u64)_InterlockedXor64((volatile i64 *)&x->_v, c); }
FORCE_INLINE u64 atomic_u64_eval_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE u64 atomic_u64_eval_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); }
FORCE_INLINE void *atomic_ptr_eval_exchange(struct atomic_ptr *x, void *e) { return (void *)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); }
FORCE_INLINE void *atomic_ptr_eval_compare_exchange(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } FORCE_INLINE void *atomic_ptr_eval_compare_exchange(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); }
FORCE_INLINE volatile void **atomic_ptr_raw(struct atomic_ptr *x) { return &x->_v; }
#else #else
# error "Atomics not implemented" # error "Atomics not implemented"

View File

@ -140,9 +140,7 @@ enum sim_control_flag {
SIM_CONTROL_FLAG_DRAG = 1 << 1, SIM_CONTROL_FLAG_DRAG = 1 << 1,
SIM_CONTROL_FLAG_CLEAR_ALL = 1 << 2, SIM_CONTROL_FLAG_CLEAR_ALL = 1 << 2,
SIM_CONTROL_FLAG_SPAWN_TEST = 1 << 3, SIM_CONTROL_FLAG_SPAWN_TEST = 1 << 3,
SIM_CONTROL_FLAG_PAUSE = 1 << 4, SIM_CONTROL_FLAG_TILE_TEST = 1 << 4,
SIM_CONTROL_FLAG_STEP = 1 << 5,
SIM_CONTROL_FLAG_TILE_TEST = 1 << 6,
}; };
struct sim_control { struct sim_control {

View File

@ -40,7 +40,9 @@ enum sim_ent_prop {
SEPROP_CAMERA, SEPROP_CAMERA,
SEPROP_CAMERA_ACTIVE, SEPROP_CAMERA_ACTIVE,
SEPROP_WEAPON_BULLETS, SEPROP_WEAPON_SMG,
SEPROP_WEAPON_LAUNCHER,
SEPROP_TRIGGERING_EQUIPPED, SEPROP_TRIGGERING_EQUIPPED,
SEPROP_TRIGGERED_THIS_TICK, SEPROP_TRIGGERED_THIS_TICK,
SEPROP_TRIGGER_NEXT_TICK, SEPROP_TRIGGER_NEXT_TICK,

View File

@ -120,7 +120,7 @@ INTERNAL struct sim_ent *spawn_test_employee(struct sim_step_ctx *ctx)
e->attach_slice = LIT("attach.wep"); e->attach_slice = LIT("attach.wep");
e->layer = SIM_LAYER_RELATIVE_WEAPON; e->layer = SIM_LAYER_RELATIVE_WEAPON;
sim_ent_enable_prop(e, SEPROP_WEAPON_BULLETS); sim_ent_enable_prop(e, SEPROP_WEAPON_LAUNCHER);
e->trigger_delay = 1.0f / 10.0f; e->trigger_delay = 1.0f / 10.0f;
//e->trigger_delay = 1.0f / 100.0f; //e->trigger_delay = 1.0f / 100.0f;
@ -877,14 +877,12 @@ void sim_step(struct sim_step_ctx *ctx)
ent->last_triggered_ns = world->sim_time_ns; ent->last_triggered_ns = world->sim_time_ns;
/* Fire weapon */ /* Fire smg */
if (sim_ent_has_prop(ent, SEPROP_WEAPON_BULLETS)) { if (sim_ent_has_prop(ent, SEPROP_WEAPON_SMG)) {
struct sprite_tag sprite = ent->sprite; struct sprite_tag sprite = ent->sprite;
u32 animation_frame = ent->animation_frame; u32 animation_frame = ent->animation_frame;
struct sprite_sheet *sheet = sprite_sheet_from_tag_await(sprite_frame_scope, sprite); struct sprite_sheet *sheet = sprite_sheet_from_tag_await(sprite_frame_scope, sprite);
struct xform sprite_local_xform = ent->sprite_local_xform; struct xform sprite_local_xform = ent->sprite_local_xform;
struct sprite_sheet_slice out_slice = sprite_sheet_get_slice(sheet, LIT("out"), animation_frame); struct sprite_sheet_slice out_slice = sprite_sheet_get_slice(sheet, LIT("out"), animation_frame);
struct v2 rel_pos = xform_mul_v2(sprite_local_xform, out_slice.center); struct v2 rel_pos = xform_mul_v2(sprite_local_xform, out_slice.center);
struct v2 rel_dir = xform_basis_mul_v2(sprite_local_xform, out_slice.dir); struct v2 rel_dir = xform_basis_mul_v2(sprite_local_xform, out_slice.dir);
@ -927,6 +925,50 @@ void sim_step(struct sim_step_ctx *ctx)
bullet->bullet_tracer = tracer->id; bullet->bullet_tracer = tracer->id;
} }
} }
/* Fire launcher */
if (sim_ent_has_prop(ent, SEPROP_WEAPON_LAUNCHER)) {
struct sprite_tag sprite = ent->sprite;
u32 animation_frame = ent->animation_frame;
struct sprite_sheet *sheet = sprite_sheet_from_tag_await(sprite_frame_scope, sprite);
struct xform sprite_local_xform = ent->sprite_local_xform;
struct sprite_sheet_slice out_slice = sprite_sheet_get_slice(sheet, LIT("out"), animation_frame);
struct v2 rel_pos = xform_mul_v2(sprite_local_xform, out_slice.center);
struct v2 rel_dir = xform_basis_mul_v2(sprite_local_xform, out_slice.dir);
/* Spawn bullet */
struct sim_ent *bullet;
{
bullet = sim_ent_alloc_sync_src(root);
bullet->bullet_src = ent->id;
bullet->bullet_src_pos = rel_pos;
bullet->bullet_src_dir = rel_dir;
//bullet->bullet_impulse = 0.75f;
bullet->bullet_impulse = 2.0f;
bullet->bullet_knockback = 10;
bullet->mass_unscaled = 0.04f;
bullet->inertia_unscaled = 0.00001f;
bullet->layer = SIM_LAYER_BULLETS;
/* Point collider */
bullet->local_collider.points[0] = V2(0, 0);
bullet->local_collider.count = 1;
sim_ent_enable_prop(bullet, SEPROP_BULLET);
sim_ent_enable_prop(bullet, SEPROP_SENSOR);
}
/* Spawn tracer */
{
struct sim_ent *tracer = sim_ent_alloc_sync_src(root);
tracer->tracer_fade_duration = 0.025f;
tracer->layer = SIM_LAYER_TRACERS;
sim_ent_enable_prop(tracer, SEPROP_TRACER);
bullet->bullet_tracer = tracer->id;
}
}
} }
/* ========================== * /* ========================== *

View File

@ -1281,7 +1281,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET;
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
{ {
struct cache_refcount refcount = *(struct cache_refcount *)atomic_u64_raw(&entry->refcount_struct); u64 refcount_uncast = atomic_u64_eval(&entry->refcount_struct);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) { if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */ /* Cache node has been referenced since scan, skip node. */
} else if (cache_over_budget_target || last_ref_cycle < 0) { } else if (cache_over_budget_target || last_ref_cycle < 0) {
@ -1339,7 +1340,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
} }
} }
} }
atomic_i32_inc_eval(&G.evictor_cycle); atomic_i32_eval_add(&G.evictor_cycle, 1);
scratch_end(scratch); scratch_end(scratch);
/* Wait */ /* Wait */

View File

@ -1606,7 +1606,7 @@ struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex)
__proflock_after_exclusive_lock(mutex->profiling_ctx); __proflock_after_exclusive_lock(mutex->profiling_ctx);
#if RTC #if RTC
mutex->owner_tid = (u64)GetCurrentThreadId(); mutex->owner_tid = (u64)GetCurrentThreadId();
atomic_i64_inc_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, 1);
#endif #endif
struct sys_lock lock = ZI; struct sys_lock lock = ZI;
lock.exclusive = true; lock.exclusive = true;
@ -1621,7 +1621,7 @@ struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex)
AcquireSRWLockShared((SRWLOCK *)&mutex->handle); AcquireSRWLockShared((SRWLOCK *)&mutex->handle);
__proflock_after_shared_lock(mutex->profiling_ctx); __proflock_after_shared_lock(mutex->profiling_ctx);
#if RTC #if RTC
atomic_i64_inc_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, 1);
#endif #endif
struct sys_lock lock = ZI; struct sys_lock lock = ZI;
lock.mutex = mutex; lock.mutex = mutex;
@ -1632,7 +1632,7 @@ void sys_mutex_unlock(struct sys_lock *lock)
{ {
__prof; __prof;
#if RTC #if RTC
atomic_i64_dec_eval(&lock->mutex->count); atomic_i64_eval_add(&lock->mutex->count, -1);
lock->mutex->owner_tid = 0; lock->mutex->owner_tid = 0;
#endif #endif
if (lock->exclusive) { if (lock->exclusive) {
@ -1715,11 +1715,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_l
struct sys_mutex *mutex = lock->mutex; struct sys_mutex *mutex = lock->mutex;
b32 exclusive = lock->exclusive; b32 exclusive = lock->exclusive;
#if RTC #if RTC
atomic_i64_inc_eval(&cv->num_waiters); atomic_i64_eval_add(&cv->num_waiters, 1);
if (exclusive) { if (exclusive) {
mutex->owner_tid = 0; mutex->owner_tid = 0;
} }
atomic_i64_dec_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, -1);
#endif #endif
struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle;
@ -1739,11 +1739,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_l
} }
#if RTC #if RTC
atomic_i64_inc_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, 1);
if (exclusive) { if (exclusive) {
mutex->owner_tid = (u64)GetCurrentThreadId(); mutex->owner_tid = (u64)GetCurrentThreadId();
} }
atomic_i64_dec_eval(&cv->num_waiters); atomic_i64_eval_add(&cv->num_waiters, -1);
#endif #endif
} }
@ -1753,11 +1753,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct
struct sys_mutex *mutex = lock->mutex; struct sys_mutex *mutex = lock->mutex;
b32 exclusive = lock->exclusive; b32 exclusive = lock->exclusive;
#if RTC #if RTC
atomic_i64_inc_eval(&cv->num_waiters); atomic_i64_eval_add(&cv->num_waiters, 1);
if (exclusive) { if (exclusive) {
mutex->owner_tid = 0; mutex->owner_tid = 0;
} }
atomic_i64_dec_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, -1);
#endif #endif
struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle;
u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f); u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f);
@ -1778,11 +1778,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct
} }
#if RTC #if RTC
atomic_i64_inc_eval(&mutex->count); atomic_i64_eval_add(&mutex->count, 1);
if (exclusive) { if (exclusive) {
mutex->owner_tid = (u64)GetCurrentThreadId(); mutex->owner_tid = (u64)GetCurrentThreadId();
} }
atomic_i64_dec_eval(&cv->num_waiters); atomic_i64_eval_add(&cv->num_waiters, -1);
#endif #endif
} }

View File

@ -91,6 +91,9 @@ GLOBAL struct {
u64 last_user_sim_cmd_gen; u64 last_user_sim_cmd_gen;
u64 user_sim_cmd_gen; u64 user_sim_cmd_gen;
struct atomic_i32 user_paused;
struct atomic_i32 user_paused_steps;
/* Local sim -> user */ /* Local sim -> user */
struct sys_mutex local_to_user_client_mutex; struct sys_mutex local_to_user_client_mutex;
struct sim_client_store *local_to_user_client_store; struct sim_client_store *local_to_user_client_store;
@ -511,7 +514,7 @@ INTERNAL void user_update(void)
G.local_sim_last_known_tick = newest_snapshot->tick; G.local_sim_last_known_tick = newest_snapshot->tick;
G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress); G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress);
#if USER_INTERP_ENABLED if (USER_INTERP_ENABLED && !atomic_i32_eval(&G.user_paused)) {
/* Determine render time */ /* Determine render time */
G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns); G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns);
if (G.average_local_to_user_snapshot_publish_dt_ns > 0) { if (G.average_local_to_user_snapshot_publish_dt_ns > 0) {
@ -552,7 +555,12 @@ INTERNAL void user_update(void)
} else if (right_snapshot->valid) { } else if (right_snapshot->valid) {
G.ss_blended = sim_snapshot_alloc(G.user_blended_client, right_snapshot, right_snapshot->tick); G.ss_blended = sim_snapshot_alloc(G.user_blended_client, right_snapshot, right_snapshot->tick);
} }
#else
/* Release unneeded unblended sim snapshots */
if (left_snapshot->tick > 0) {
sim_snapshot_release_ticks_in_range(G.user_unblended_client, 0, left_snapshot->tick - 1);
}
} else {
/* Interp disabled, just copy latest snapshot */ /* Interp disabled, just copy latest snapshot */
G.render_time_target_ns = newest_snapshot->sim_time_ns; G.render_time_target_ns = newest_snapshot->sim_time_ns;
G.render_time_ns = newest_snapshot->sim_time_ns; G.render_time_ns = newest_snapshot->sim_time_ns;
@ -562,11 +570,6 @@ INTERNAL void user_update(void)
} }
G.ss_blended = sim_snapshot_alloc(G.user_blended_client, newest_snapshot, newest_snapshot->tick); G.ss_blended = sim_snapshot_alloc(G.user_blended_client, newest_snapshot, newest_snapshot->tick);
} }
#endif
/* Release unneeded unblended sim snapshots */
if (left_snapshot->tick > 0) {
sim_snapshot_release_ticks_in_range(G.user_unblended_client, 0, left_snapshot->tick - 1);
} }
/* Release unneeded blended snapshots */ /* Release unneeded blended snapshots */
@ -1583,10 +1586,10 @@ INTERNAL void user_update(void)
struct bind_state fire_state = G.bind_states[USER_BIND_KIND_FIRE]; struct bind_state fire_state = G.bind_states[USER_BIND_KIND_FIRE];
struct bind_state drag_state = G.bind_states[USER_BIND_KIND_DEBUG_DRAG]; struct bind_state drag_state = G.bind_states[USER_BIND_KIND_DEBUG_DRAG];
struct bind_state clear_state = G.bind_states[USER_BIND_KIND_DEBUG_CLEAR]; struct bind_state clear_state = G.bind_states[USER_BIND_KIND_DEBUG_CLEAR];
struct bind_state pause_state = G.bind_states[USER_BIND_KIND_DEBUG_PAUSE];
struct bind_state step_state = G.bind_states[USER_BIND_KIND_DEBUG_STEP];
struct bind_state spawn_state = G.bind_states[USER_BIND_KIND_DEBUG_SPAWN]; struct bind_state spawn_state = G.bind_states[USER_BIND_KIND_DEBUG_SPAWN];
struct bind_state tile_state = G.bind_states[USER_BIND_KIND_TILE_TEST]; struct bind_state tile_state = G.bind_states[USER_BIND_KIND_TILE_TEST];
struct bind_state pause_state = G.bind_states[USER_BIND_KIND_DEBUG_PAUSE];
struct bind_state step_state = G.bind_states[USER_BIND_KIND_DEBUG_STEP];
if (fire_state.num_presses || fire_state.is_held) { if (fire_state.num_presses || fire_state.is_held) {
control.flags |= SIM_CONTROL_FLAG_FIRE; control.flags |= SIM_CONTROL_FLAG_FIRE;
@ -1597,12 +1600,6 @@ INTERNAL void user_update(void)
if (clear_state.num_presses) { if (clear_state.num_presses) {
control.flags |= SIM_CONTROL_FLAG_CLEAR_ALL; control.flags |= SIM_CONTROL_FLAG_CLEAR_ALL;
} }
if (pause_state.num_presses) {
control.flags |= SIM_CONTROL_FLAG_PAUSE;
}
if (step_state.num_presses) {
control.flags |= SIM_CONTROL_FLAG_STEP;
}
if (spawn_state.num_presses) { if (spawn_state.num_presses) {
control.flags |= SIM_CONTROL_FLAG_SPAWN_TEST; control.flags |= SIM_CONTROL_FLAG_SPAWN_TEST;
} }
@ -1610,6 +1607,11 @@ INTERNAL void user_update(void)
control.flags |= SIM_CONTROL_FLAG_TILE_TEST; control.flags |= SIM_CONTROL_FLAG_TILE_TEST;
} }
if (pause_state.num_presses) {
atomic_i32_eval_xor(&G.user_paused, 1);
}
atomic_i32_eval_add(&G.user_paused_steps, step_state.num_presses_and_repeats);
/* Set user sim control */ /* Set user sim control */
{ {
struct sys_lock lock = sys_mutex_lock_e(&G.user_sim_cmd_mutex); struct sys_lock lock = sys_mutex_lock_e(&G.user_sim_cmd_mutex);
@ -2053,8 +2055,6 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg)
i64 master_blend_time_ns = 0; i64 master_blend_time_ns = 0;
i64 average_master_receive_dt_ns = 0; i64 average_master_receive_dt_ns = 0;
i64 last_tick_from_master_received_at_ns = 0; i64 last_tick_from_master_received_at_ns = 0;
@ -2249,6 +2249,19 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg)
} }
} }
b32 should_step = true;
if (atomic_i32_eval(&G.user_paused)) {
should_step = false;
}
if (atomic_i32_eval(&G.user_paused_steps) > 0) {
should_step = true;
atomic_i32_eval_add(&G.user_paused_steps, -1);
}
if (!should_step) {
goto skip_step;
}
/* Update networked clients */ /* Update networked clients */
u64 oldest_client_ack = 0; u64 oldest_client_ack = 0;
for (u64 i = 0; i < store->num_clients_reserved; ++i) { for (u64 i = 0; i < store->num_clients_reserved; ++i) {
@ -2445,7 +2458,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg)
/* We want to simulate the ahead of the server to predict client input. /* We want to simulate the ahead of the server to predict client input.
* How many ticks ahead we want to simulate is a balance between added latency and the server not receiving our inputs on time. * How many ticks ahead we want to simulate is a balance between added latency and the server not receiving our inputs on time.
* We can take the server's ack - server's tick to determine how many cmds of ours the server has buffered. * We can take the server's ack minus the server's tick to determine how many cmds of ours the server has buffered.
* *
* If this buffer gets too low (because we are lagging behind or the connection is unstable), meaning the server is not getting our input on time: * If this buffer gets too low (because we are lagging behind or the connection is unstable), meaning the server is not getting our input on time:
* - Shorten local compute rate to increase the rate at which we predict ahead & produce cmds, until the server's ack indicates a buffer size within desired range. * - Shorten local compute rate to increase the rate at which we predict ahead & produce cmds, until the server's ack indicates a buffer size within desired range.