From 5e81b78ac17064012bbba4a35de7d9f3f297f456 Mon Sep 17 00:00:00 2001 From: jacob Date: Thu, 15 May 2025 05:27:33 -0500 Subject: [PATCH] re-add sim pausing & stepping --- src/atomic.h | 30 ++++------- src/sim.h | 4 +- src/sim_ent.h | 4 +- src/sim_step.c | 52 ++++++++++++++++-- src/sprite.c | 5 +- src/sys_win32.c | 22 ++++---- src/user.c | 141 ++++++++++++++++++++++++++---------------------- 7 files changed, 153 insertions(+), 105 deletions(-) diff --git a/src/atomic.h b/src/atomic.h index 2313a8d4..6187d05d 100644 --- a/src/atomic.h +++ b/src/atomic.h @@ -6,42 +6,34 @@ /* TODO: Remove "..._raw" functions */ FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } -FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return (i32)_InterlockedIncrement((volatile long *)&x->_v); } -FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return (i32)_InterlockedDecrement((volatile long *)&x->_v); } -FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); } FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); } FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); } -FORCE_INLINE volatile i32 *atomic_i32_raw(struct atomic_i32 *x) { return &x->_v; } +FORCE_INLINE i32 atomic_i32_eval_xor(struct atomic_i32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); } +FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); } FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); } -FORCE_INLINE i64 atomic_i64_inc_eval(struct atomic_i64 *x) { return (i64)_InterlockedIncrement64(&x->_v); } -FORCE_INLINE i64 atomic_i64_dec_eval(struct atomic_i64 *x) { return (i64)_InterlockedDecrement64(&x->_v); } -FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); } FORCE_INLINE i64 atomic_i64_eval_exchange(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); } FORCE_INLINE i64 atomic_i64_eval_compare_exchange(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); } -FORCE_INLINE volatile i64 *atomic_i64_raw(struct atomic_i64 *x) { return &x->_v; } +FORCE_INLINE i64 atomic_i64_eval_xor(struct atomic_i64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); } +FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); } FORCE_INLINE u32 atomic_u32_eval(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } -FORCE_INLINE u32 atomic_u32_inc_eval(struct atomic_u32 *x) { return (u32)_InterlockedIncrement((volatile long *)&x->_v); } -FORCE_INLINE u32 atomic_u32_dec_eval(struct atomic_u32 *x) { return (u32)_InterlockedDecrement((volatile long *)&x->_v); } -FORCE_INLINE u32 atomic_u32_eval_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } -FORCE_INLINE u32 atomic_u32_eval_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); } FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); } -FORCE_INLINE volatile u32 *atomic_u32_raw(struct atomic_u32 *x) { return &x->_v; } +FORCE_INLINE u32 atomic_u32_eval_xor(struct atomic_u32 *x, u32 c) { return (u32)_InterlockedXor((volatile long *)&x->_v, c); } +FORCE_INLINE u32 atomic_u32_eval_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } +FORCE_INLINE u32 atomic_u32_eval_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } -FORCE_INLINE u64 atomic_u64_inc_eval(struct atomic_u64 *x) { return (u64)_InterlockedIncrement64((volatile i64 *)&x->_v); } -FORCE_INLINE u64 atomic_u64_dec_eval(struct atomic_u64 *x) { return (u64)_InterlockedDecrement64((volatile i64 *)&x->_v); } -FORCE_INLINE u64 atomic_u64_eval_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } -FORCE_INLINE u64 atomic_u64_eval_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } -FORCE_INLINE volatile u64 *atomic_u64_raw(struct atomic_u64 *x) { return &x->_v; } +FORCE_INLINE u32 atomic_u64_eval_xor(struct atomic_u64 *x, u64 c) { return (u64)_InterlockedXor64((volatile i64 *)&x->_v, c); } +FORCE_INLINE u64 atomic_u64_eval_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } +FORCE_INLINE u64 atomic_u64_eval_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } +FORCE_INLINE void *atomic_ptr_eval_exchange(struct atomic_ptr *x, void *e) { return (void *)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } FORCE_INLINE void *atomic_ptr_eval_compare_exchange(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } -FORCE_INLINE volatile void **atomic_ptr_raw(struct atomic_ptr *x) { return &x->_v; } #else # error "Atomics not implemented" diff --git a/src/sim.h b/src/sim.h index 5788a4b2..c102193b 100644 --- a/src/sim.h +++ b/src/sim.h @@ -140,9 +140,7 @@ enum sim_control_flag { SIM_CONTROL_FLAG_DRAG = 1 << 1, SIM_CONTROL_FLAG_CLEAR_ALL = 1 << 2, SIM_CONTROL_FLAG_SPAWN_TEST = 1 << 3, - SIM_CONTROL_FLAG_PAUSE = 1 << 4, - SIM_CONTROL_FLAG_STEP = 1 << 5, - SIM_CONTROL_FLAG_TILE_TEST = 1 << 6, + SIM_CONTROL_FLAG_TILE_TEST = 1 << 4, }; struct sim_control { diff --git a/src/sim_ent.h b/src/sim_ent.h index 95112479..dcc3e393 100644 --- a/src/sim_ent.h +++ b/src/sim_ent.h @@ -40,7 +40,9 @@ enum sim_ent_prop { SEPROP_CAMERA, SEPROP_CAMERA_ACTIVE, - SEPROP_WEAPON_BULLETS, + SEPROP_WEAPON_SMG, + SEPROP_WEAPON_LAUNCHER, + SEPROP_TRIGGERING_EQUIPPED, SEPROP_TRIGGERED_THIS_TICK, SEPROP_TRIGGER_NEXT_TICK, diff --git a/src/sim_step.c b/src/sim_step.c index d711f4c5..05efe4ad 100644 --- a/src/sim_step.c +++ b/src/sim_step.c @@ -120,7 +120,7 @@ INTERNAL struct sim_ent *spawn_test_employee(struct sim_step_ctx *ctx) e->attach_slice = LIT("attach.wep"); e->layer = SIM_LAYER_RELATIVE_WEAPON; - sim_ent_enable_prop(e, SEPROP_WEAPON_BULLETS); + sim_ent_enable_prop(e, SEPROP_WEAPON_LAUNCHER); e->trigger_delay = 1.0f / 10.0f; //e->trigger_delay = 1.0f / 100.0f; @@ -877,14 +877,12 @@ void sim_step(struct sim_step_ctx *ctx) ent->last_triggered_ns = world->sim_time_ns; - /* Fire weapon */ - if (sim_ent_has_prop(ent, SEPROP_WEAPON_BULLETS)) { + /* Fire smg */ + if (sim_ent_has_prop(ent, SEPROP_WEAPON_SMG)) { struct sprite_tag sprite = ent->sprite; u32 animation_frame = ent->animation_frame; struct sprite_sheet *sheet = sprite_sheet_from_tag_await(sprite_frame_scope, sprite); - struct xform sprite_local_xform = ent->sprite_local_xform; - struct sprite_sheet_slice out_slice = sprite_sheet_get_slice(sheet, LIT("out"), animation_frame); struct v2 rel_pos = xform_mul_v2(sprite_local_xform, out_slice.center); struct v2 rel_dir = xform_basis_mul_v2(sprite_local_xform, out_slice.dir); @@ -927,6 +925,50 @@ void sim_step(struct sim_step_ctx *ctx) bullet->bullet_tracer = tracer->id; } } + + /* Fire launcher */ + if (sim_ent_has_prop(ent, SEPROP_WEAPON_LAUNCHER)) { + struct sprite_tag sprite = ent->sprite; + u32 animation_frame = ent->animation_frame; + struct sprite_sheet *sheet = sprite_sheet_from_tag_await(sprite_frame_scope, sprite); + struct xform sprite_local_xform = ent->sprite_local_xform; + struct sprite_sheet_slice out_slice = sprite_sheet_get_slice(sheet, LIT("out"), animation_frame); + struct v2 rel_pos = xform_mul_v2(sprite_local_xform, out_slice.center); + struct v2 rel_dir = xform_basis_mul_v2(sprite_local_xform, out_slice.dir); + + /* Spawn bullet */ + struct sim_ent *bullet; + { + bullet = sim_ent_alloc_sync_src(root); + + bullet->bullet_src = ent->id; + bullet->bullet_src_pos = rel_pos; + bullet->bullet_src_dir = rel_dir; + //bullet->bullet_impulse = 0.75f; + bullet->bullet_impulse = 2.0f; + bullet->bullet_knockback = 10; + bullet->mass_unscaled = 0.04f; + bullet->inertia_unscaled = 0.00001f; + bullet->layer = SIM_LAYER_BULLETS; + + /* Point collider */ + bullet->local_collider.points[0] = V2(0, 0); + bullet->local_collider.count = 1; + + sim_ent_enable_prop(bullet, SEPROP_BULLET); + sim_ent_enable_prop(bullet, SEPROP_SENSOR); + } + + /* Spawn tracer */ + { + struct sim_ent *tracer = sim_ent_alloc_sync_src(root); + tracer->tracer_fade_duration = 0.025f; + tracer->layer = SIM_LAYER_TRACERS; + sim_ent_enable_prop(tracer, SEPROP_TRACER); + + bullet->bullet_tracer = tracer->id; + } + } } /* ========================== * diff --git a/src/sprite.c b/src/sprite.c index 49f819c8..3b5e2bdd 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -1281,7 +1281,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); { - struct cache_refcount refcount = *(struct cache_refcount *)atomic_u64_raw(&entry->refcount_struct); + u64 refcount_uncast = atomic_u64_eval(&entry->refcount_struct); + struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) { /* Cache node has been referenced since scan, skip node. */ } else if (cache_over_budget_target || last_ref_cycle < 0) { @@ -1339,7 +1340,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) } } } - atomic_i32_inc_eval(&G.evictor_cycle); + atomic_i32_eval_add(&G.evictor_cycle, 1); scratch_end(scratch); /* Wait */ diff --git a/src/sys_win32.c b/src/sys_win32.c index 2c3b9346..56147791 100644 --- a/src/sys_win32.c +++ b/src/sys_win32.c @@ -1606,7 +1606,7 @@ struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex) __proflock_after_exclusive_lock(mutex->profiling_ctx); #if RTC mutex->owner_tid = (u64)GetCurrentThreadId(); - atomic_i64_inc_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, 1); #endif struct sys_lock lock = ZI; lock.exclusive = true; @@ -1621,7 +1621,7 @@ struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex) AcquireSRWLockShared((SRWLOCK *)&mutex->handle); __proflock_after_shared_lock(mutex->profiling_ctx); #if RTC - atomic_i64_inc_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, 1); #endif struct sys_lock lock = ZI; lock.mutex = mutex; @@ -1632,7 +1632,7 @@ void sys_mutex_unlock(struct sys_lock *lock) { __prof; #if RTC - atomic_i64_dec_eval(&lock->mutex->count); + atomic_i64_eval_add(&lock->mutex->count, -1); lock->mutex->owner_tid = 0; #endif if (lock->exclusive) { @@ -1715,11 +1715,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_l struct sys_mutex *mutex = lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_inc_eval(&cv->num_waiters); + atomic_i64_eval_add(&cv->num_waiters, 1); if (exclusive) { mutex->owner_tid = 0; } - atomic_i64_dec_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, -1); #endif struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; @@ -1739,11 +1739,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_l } #if RTC - atomic_i64_inc_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, 1); if (exclusive) { mutex->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_dec_eval(&cv->num_waiters); + atomic_i64_eval_add(&cv->num_waiters, -1); #endif } @@ -1753,11 +1753,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct struct sys_mutex *mutex = lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_inc_eval(&cv->num_waiters); + atomic_i64_eval_add(&cv->num_waiters, 1); if (exclusive) { mutex->owner_tid = 0; } - atomic_i64_dec_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, -1); #endif struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f); @@ -1778,11 +1778,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct } #if RTC - atomic_i64_inc_eval(&mutex->count); + atomic_i64_eval_add(&mutex->count, 1); if (exclusive) { mutex->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_dec_eval(&cv->num_waiters); + atomic_i64_eval_add(&cv->num_waiters, -1); #endif } diff --git a/src/user.c b/src/user.c index 6b4d1a7f..f5a9f1eb 100644 --- a/src/user.c +++ b/src/user.c @@ -91,6 +91,9 @@ GLOBAL struct { u64 last_user_sim_cmd_gen; u64 user_sim_cmd_gen; + struct atomic_i32 user_paused; + struct atomic_i32 user_paused_steps; + /* Local sim -> user */ struct sys_mutex local_to_user_client_mutex; struct sim_client_store *local_to_user_client_store; @@ -511,62 +514,62 @@ INTERNAL void user_update(void) G.local_sim_last_known_tick = newest_snapshot->tick; G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress); -#if USER_INTERP_ENABLED - /* Determine render time */ - G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns); - if (G.average_local_to_user_snapshot_publish_dt_ns > 0) { - /* Increment render time based on average publish dt */ - f64 sim_publish_timescale = (f64)newest_snapshot->sim_dt_ns / (f64)G.average_local_to_user_snapshot_publish_dt_ns; - G.render_time_ns += G.real_dt_ns * sim_publish_timescale; - } - i64 render_time_target_diff_ns = G.render_time_target_ns - G.render_time_ns; - if (render_time_target_diff_ns > NS_FROM_SECONDS(0.010) || render_time_target_diff_ns < NS_FROM_SECONDS(-0.005)) { - /* Snap render time if it gets too out of sync with target render time */ - G.render_time_ns = G.render_time_target_ns; - } - - /* Get two snapshots nearest to render time */ - struct sim_snapshot *left_snapshot = sim_snapshot_nil(); - struct sim_snapshot *right_snapshot = newest_snapshot; - { - struct sim_snapshot *ss = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->first_tick); - while (ss->valid) { - u64 next_tick = ss->next_tick; - i64 ss_time_ns = ss->sim_time_ns; - if (ss_time_ns < G.render_time_ns && ss_time_ns > left_snapshot->sim_time_ns) { - left_snapshot = ss; - } - if (ss_time_ns > G.render_time_ns && ss_time_ns < right_snapshot->sim_time_ns) { - right_snapshot = ss; - } - ss = sim_snapshot_from_tick(G.user_unblended_client, next_tick); + if (USER_INTERP_ENABLED && !atomic_i32_eval(&G.user_paused)) { + /* Determine render time */ + G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns); + if (G.average_local_to_user_snapshot_publish_dt_ns > 0) { + /* Increment render time based on average publish dt */ + f64 sim_publish_timescale = (f64)newest_snapshot->sim_dt_ns / (f64)G.average_local_to_user_snapshot_publish_dt_ns; + G.render_time_ns += G.real_dt_ns * sim_publish_timescale; } - } - - /* Create world from blended snapshots */ - if (left_snapshot->valid && right_snapshot->valid) { - f64 blend = (f64)(G.render_time_ns - left_snapshot->sim_time_ns) / (f64)(right_snapshot->sim_time_ns - left_snapshot->sim_time_ns); - G.ss_blended = sim_snapshot_alloc_from_lerp(G.user_blended_client, left_snapshot, right_snapshot, blend); - } else if (left_snapshot->valid) { - G.ss_blended = sim_snapshot_alloc(G.user_blended_client, left_snapshot, left_snapshot->tick); - } else if (right_snapshot->valid) { - G.ss_blended = sim_snapshot_alloc(G.user_blended_client, right_snapshot, right_snapshot->tick); - } -#else - /* Interp disabled, just copy latest snapshot */ - G.render_time_target_ns = newest_snapshot->sim_time_ns; - G.render_time_ns = newest_snapshot->sim_time_ns; - if (G.ss_blended->tick != newest_snapshot->tick) { - if (G.ss_blended->valid) { - sim_snapshot_release(G.ss_blended); + i64 render_time_target_diff_ns = G.render_time_target_ns - G.render_time_ns; + if (render_time_target_diff_ns > NS_FROM_SECONDS(0.010) || render_time_target_diff_ns < NS_FROM_SECONDS(-0.005)) { + /* Snap render time if it gets too out of sync with target render time */ + G.render_time_ns = G.render_time_target_ns; } - G.ss_blended = sim_snapshot_alloc(G.user_blended_client, newest_snapshot, newest_snapshot->tick); - } -#endif - /* Release unneeded unblended sim snapshots */ - if (left_snapshot->tick > 0) { - sim_snapshot_release_ticks_in_range(G.user_unblended_client, 0, left_snapshot->tick - 1); + /* Get two snapshots nearest to render time */ + struct sim_snapshot *left_snapshot = sim_snapshot_nil(); + struct sim_snapshot *right_snapshot = newest_snapshot; + { + struct sim_snapshot *ss = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->first_tick); + while (ss->valid) { + u64 next_tick = ss->next_tick; + i64 ss_time_ns = ss->sim_time_ns; + if (ss_time_ns < G.render_time_ns && ss_time_ns > left_snapshot->sim_time_ns) { + left_snapshot = ss; + } + if (ss_time_ns > G.render_time_ns && ss_time_ns < right_snapshot->sim_time_ns) { + right_snapshot = ss; + } + ss = sim_snapshot_from_tick(G.user_unblended_client, next_tick); + } + } + + /* Create world from blended snapshots */ + if (left_snapshot->valid && right_snapshot->valid) { + f64 blend = (f64)(G.render_time_ns - left_snapshot->sim_time_ns) / (f64)(right_snapshot->sim_time_ns - left_snapshot->sim_time_ns); + G.ss_blended = sim_snapshot_alloc_from_lerp(G.user_blended_client, left_snapshot, right_snapshot, blend); + } else if (left_snapshot->valid) { + G.ss_blended = sim_snapshot_alloc(G.user_blended_client, left_snapshot, left_snapshot->tick); + } else if (right_snapshot->valid) { + G.ss_blended = sim_snapshot_alloc(G.user_blended_client, right_snapshot, right_snapshot->tick); + } + + /* Release unneeded unblended sim snapshots */ + if (left_snapshot->tick > 0) { + sim_snapshot_release_ticks_in_range(G.user_unblended_client, 0, left_snapshot->tick - 1); + } + } else { + /* Interp disabled, just copy latest snapshot */ + G.render_time_target_ns = newest_snapshot->sim_time_ns; + G.render_time_ns = newest_snapshot->sim_time_ns; + if (G.ss_blended->tick != newest_snapshot->tick) { + if (G.ss_blended->valid) { + sim_snapshot_release(G.ss_blended); + } + G.ss_blended = sim_snapshot_alloc(G.user_blended_client, newest_snapshot, newest_snapshot->tick); + } } /* Release unneeded blended snapshots */ @@ -1583,10 +1586,10 @@ INTERNAL void user_update(void) struct bind_state fire_state = G.bind_states[USER_BIND_KIND_FIRE]; struct bind_state drag_state = G.bind_states[USER_BIND_KIND_DEBUG_DRAG]; struct bind_state clear_state = G.bind_states[USER_BIND_KIND_DEBUG_CLEAR]; - struct bind_state pause_state = G.bind_states[USER_BIND_KIND_DEBUG_PAUSE]; - struct bind_state step_state = G.bind_states[USER_BIND_KIND_DEBUG_STEP]; struct bind_state spawn_state = G.bind_states[USER_BIND_KIND_DEBUG_SPAWN]; struct bind_state tile_state = G.bind_states[USER_BIND_KIND_TILE_TEST]; + struct bind_state pause_state = G.bind_states[USER_BIND_KIND_DEBUG_PAUSE]; + struct bind_state step_state = G.bind_states[USER_BIND_KIND_DEBUG_STEP]; if (fire_state.num_presses || fire_state.is_held) { control.flags |= SIM_CONTROL_FLAG_FIRE; @@ -1597,12 +1600,6 @@ INTERNAL void user_update(void) if (clear_state.num_presses) { control.flags |= SIM_CONTROL_FLAG_CLEAR_ALL; } - if (pause_state.num_presses) { - control.flags |= SIM_CONTROL_FLAG_PAUSE; - } - if (step_state.num_presses) { - control.flags |= SIM_CONTROL_FLAG_STEP; - } if (spawn_state.num_presses) { control.flags |= SIM_CONTROL_FLAG_SPAWN_TEST; } @@ -1610,6 +1607,11 @@ INTERNAL void user_update(void) control.flags |= SIM_CONTROL_FLAG_TILE_TEST; } + if (pause_state.num_presses) { + atomic_i32_eval_xor(&G.user_paused, 1); + } + atomic_i32_eval_add(&G.user_paused_steps, step_state.num_presses_and_repeats); + /* Set user sim control */ { struct sys_lock lock = sys_mutex_lock_e(&G.user_sim_cmd_mutex); @@ -2053,8 +2055,6 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg) - - i64 master_blend_time_ns = 0; i64 average_master_receive_dt_ns = 0; i64 last_tick_from_master_received_at_ns = 0; @@ -2249,6 +2249,19 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg) } } + b32 should_step = true; + if (atomic_i32_eval(&G.user_paused)) { + should_step = false; + } + if (atomic_i32_eval(&G.user_paused_steps) > 0) { + should_step = true; + atomic_i32_eval_add(&G.user_paused_steps, -1); + } + + if (!should_step) { + goto skip_step; + } + /* Update networked clients */ u64 oldest_client_ack = 0; for (u64 i = 0; i < store->num_clients_reserved; ++i) { @@ -2445,7 +2458,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg) /* We want to simulate the ahead of the server to predict client input. * How many ticks ahead we want to simulate is a balance between added latency and the server not receiving our inputs on time. - * We can take the server's ack - server's tick to determine how many cmds of ours the server has buffered. + * We can take the server's ack minus the server's tick to determine how many cmds of ours the server has buffered. * * If this buffer gets too low (because we are lagging behind or the connection is unstable), meaning the server is not getting our input on time: * - Shorten local compute rate to increase the rate at which we predict ahead & produce cmds, until the server's ack indicates a buffer size within desired range.