211 lines
7.0 KiB
C
211 lines
7.0 KiB
C
/* WASAPI backend for audio playback
|
|
*
|
|
* Based on mmozeiko's WASAPI examples
|
|
* https://gist.github.com/mmozeiko/5a5b168e61aff4c1eaec0381da62808f#file-win32_wasapi-h
|
|
*/
|
|
|
|
PB_WSP_SharedState PB_WSP_shared_state = ZI;
|
|
|
|
////////////////////////////////
|
|
//~ Startup
|
|
|
|
PB_StartupReceipt PB_Startup(MIX_StartupReceipt *mixer_sr)
|
|
{
|
|
__prof;
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
LAX mixer_sr;
|
|
PB_WSP_InitializeWasapi();
|
|
/* Start playback job */
|
|
RunJob(1, PB_WSP_PlaybackJob, 0, PoolKind_Audio, PriorityKind_High, &g->PB_WSP_PlaybackJob_counter);
|
|
P_OnExit(&PB_WSP_Shutdown);
|
|
|
|
return (PB_StartupReceipt) { 0 };
|
|
}
|
|
|
|
P_ExitFuncDef(PB_WSP_Shutdown)
|
|
{
|
|
__prof;
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
Atomic32FetchSet(&g->shutdown, 1);
|
|
WaitOnCounter(&g->PB_WSP_PlaybackJob_counter);
|
|
}
|
|
|
|
void PB_WSP_InitializeWasapi(void)
|
|
{
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
u64 sample_rate = PB_SampleRate;
|
|
u64 channel_count = 2;
|
|
u32 channel_mask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;
|
|
|
|
/* Create enumerator to get audio device */
|
|
IMMDeviceEnumerator *enumerator;
|
|
CoCreateInstance(&CLSID_MMDeviceEnumerator, 0, CLSCTX_ALL, &IID_IMMDeviceEnumerator, (LPVOID *)&enumerator);
|
|
|
|
/* Get default playback device */
|
|
IMMDevice *device;
|
|
IMMDeviceEnumerator_GetDefaultAudioEndpoint(enumerator, eRender, eConsole, &device);
|
|
IMMDeviceEnumerator_Release(enumerator);
|
|
|
|
/* Create audio client for device */
|
|
IMMDevice_Activate(device, &IID_IAudioClient, CLSCTX_ALL, 0, (LPVOID *)&g->client);
|
|
IMMDevice_Release(device);
|
|
|
|
WAVEFORMATEXTENSIBLE format_ex = {
|
|
.Format = {
|
|
.wFormatTag = WAVE_FORMAT_EXTENSIBLE,
|
|
.nChannels = (WORD)channel_count,
|
|
.nSamplesPerSec = (WORD)sample_rate,
|
|
.nAvgBytesPerSec = (DWORD)(sample_rate * channel_count * sizeof(f32)),
|
|
.nBlockAlign = (WORD)(channel_count * sizeof(f32)),
|
|
.wBitsPerSample = (WORD)(8 * sizeof(f32)),
|
|
.cbSize = sizeof(format_ex) - sizeof(format_ex.Format),
|
|
},
|
|
.Samples.wValidBitsPerSample = 8 * sizeof(f32),
|
|
.dwChannelMask = channel_mask,
|
|
.SubFormat = MEDIASUBTYPE_IEEE_FLOAT,
|
|
};
|
|
WAVEFORMATEX *wfx = &format_ex.Format;
|
|
|
|
#if 0
|
|
b32 client_initialized = 0;
|
|
IAudioClient3 *client3;
|
|
if (SUCCEEDED(IAudioClient_QueryInterface(g->client, &IID_IAudioClient3, (LPVOID *)&client3)))
|
|
{
|
|
/* From Martins: Minimum buffer size will typically be 480 samples (10msec @ 48khz)
|
|
* but it can be 128 samples (2.66 msec @ 48khz) if driver is properly installed
|
|
* see bullet-point instructions here: https://learn.microsoft.com/en-us/windows-hardware/drivers/audio/low-latency-audio#measurement-tools
|
|
*/
|
|
UINT32 default_period_samples, fundamental_period_samples, min_period_samples, max_period_samples;
|
|
IAudioClient3_GetSharedModeEnginePeriod(client3, wfx, &default_period_samples, &fundamental_period_samples, &min_period_samples, &max_period_samples);
|
|
|
|
const DWORD flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
|
|
if (SUCCEEDED(IAudioClient3_InitializeSharedAudioStream(client3, flags, min_period_samples, wfx, 0)))
|
|
{
|
|
client_initialized = 1;
|
|
}
|
|
|
|
IAudioClient3_Release(client3);
|
|
}
|
|
#else
|
|
b32 client_initialized = 0;
|
|
#endif
|
|
|
|
if (!client_initialized)
|
|
{
|
|
/* Get duration for shared-mode streams, this will typically be 480 samples (10msec @ 48khz) */
|
|
REFERENCE_TIME duration;
|
|
IAudioClient_GetDevicePeriod(g->client, &duration, 0);
|
|
|
|
/* Initialize audio playback
|
|
*
|
|
* NOTE:
|
|
* Passing AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM will tell WASAPI to
|
|
* always convert to native mixing format. This may introduce latency
|
|
* but allows for any input format.
|
|
*/
|
|
const DWORD flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
|
|
IAudioClient_Initialize(g->client, AUDCLNT_SHAREMODE_SHARED, flags, duration, 0, wfx, 0);
|
|
}
|
|
|
|
IAudioClient_GetMixFormat(g->client, &g->buffer_format);
|
|
|
|
/* Set up event handler to wait on */
|
|
g->event = CreateEventW(0, 0, 0, 0);
|
|
IAudioClient_SetEventHandle(g->client, g->event);
|
|
|
|
/* Get playback client */
|
|
IAudioClient_GetService(g->client, &IID_IAudioRenderClient, (LPVOID *)&g->playback);
|
|
|
|
/* Start the playback */
|
|
IAudioClient_Start(g->client);
|
|
|
|
/* Get audio buffer size in samples */
|
|
IAudioClient_GetBufferSize(g->client, &g->buffer_frames);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Wasapi update
|
|
|
|
PB_WSP_Buff PB_WSP_BeginUpdate(void)
|
|
{
|
|
__prof;
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
PB_WSP_Buff wspbuf = ZI;
|
|
|
|
/* Get padding frames */
|
|
u32 padding_frames;
|
|
IAudioClient_GetCurrentPadding(g->client, &padding_frames);
|
|
|
|
/* Get output buffer from WASAPI */
|
|
wspbuf.frames_count = 0;
|
|
if (padding_frames <= g->buffer_frames)
|
|
{
|
|
wspbuf.frames_count = g->buffer_frames - padding_frames;
|
|
}
|
|
IAudioRenderClient_GetBuffer(g->playback, wspbuf.frames_count, &wspbuf.frames);
|
|
|
|
return wspbuf;
|
|
}
|
|
|
|
void PB_WSP_EndUpdate(PB_WSP_Buff *wspbuf, MIX_PcmF32 src)
|
|
{
|
|
__prof;
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
u32 frames_in_source = src.count / 2;
|
|
u32 frames_in_output = wspbuf->frames_count;
|
|
|
|
u32 flags = 0;
|
|
if (frames_in_source == frames_in_output)
|
|
{
|
|
/* Copy bytes to output */
|
|
u32 bytes_per_sample = g->buffer_format->nBlockAlign / g->buffer_format->nChannels;
|
|
u32 write_size = frames_in_source * 2 * bytes_per_sample;
|
|
CopyBytes(wspbuf->frames, src.samples, write_size);
|
|
}
|
|
else
|
|
{
|
|
/* Submit silence if not enough samples */
|
|
flags = AUDCLNT_BUFFERFLAGS_SILENT;
|
|
|
|
/* This shouldn't occur, mixer should be generating samples equivilent
|
|
* to value returned from `PB_WSP_BeginUpdate`. */
|
|
Assert(0);
|
|
}
|
|
|
|
#if !AUDIO_ENABLED
|
|
flags = AUDCLNT_BUFFERFLAGS_SILENT;
|
|
#endif
|
|
|
|
/* Submit output buffer to WASAPI */
|
|
IAudioRenderClient_ReleaseBuffer(g->playback, frames_in_source, flags);
|
|
__profframe("Audio");
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Playback job
|
|
|
|
JobDef(PB_WSP_PlaybackJob, UNUSED sig, UNUSED id)
|
|
{
|
|
__prof;
|
|
PB_WSP_SharedState *g = &PB_WSP_shared_state;
|
|
|
|
/* FIXME: If playback fails at any point and mixer stops advancing, we
|
|
* need to halt mixer to prevent memory leak when sounds are played. */
|
|
/* TODO: Signal counter that running job wiats on, rather than scheduling job manually */
|
|
while (!Atomic32Fetch(&g->shutdown))
|
|
{
|
|
TempArena scratch = BeginScratchNoConflict();
|
|
{
|
|
__profn("Wasapi wait");
|
|
WaitForSingleObject(g->event, INFINITE);
|
|
}
|
|
{
|
|
__profn("Fill sample buffer");
|
|
PB_WSP_Buff wspbuf = PB_WSP_BeginUpdate();
|
|
MIX_PcmF32 pcm = MIX_MixAllTracks(scratch.arena, wspbuf.frames_count);
|
|
PB_WSP_EndUpdate(&wspbuf, pcm);
|
|
}
|
|
EndScratch(scratch);
|
|
}
|
|
}
|