+ #include <sys/types.h>
+ #include <unistd.h>
+ #include <dlfcn.h>
+#endif
+
+/*
+Unfortunately using runtime linking for pthreads causes problems. This has occurred for me when testing on FreeBSD. When
+using runtime linking, deadlocks can occur (for me it happens when loading data from fread()). It turns out that doing
+compile-time linking fixes this. I'm not sure why this happens, but the safest way I can think of to fix this is to simply
+disable runtime linking by default. To enable runtime linking, #define this before the implementation of this file. I am
+not officially supporting this, but I'm leaving it here in case it's useful for somebody, somewhere.
+*/
+/*#define MA_USE_RUNTIME_LINKING_FOR_PTHREAD*/
+
+/* Disable run-time linking on certain backends. */
+#ifndef MA_NO_RUNTIME_LINKING
+ #if defined(MA_EMSCRIPTEN)
+ #define MA_NO_RUNTIME_LINKING
+ #endif
+#endif
+
+
+MA_API void ma_device_info_add_native_data_format(ma_device_info* pDeviceInfo, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags)
+{
+ if (pDeviceInfo == NULL) {
+ return;
+ }
+
+ if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats)) {
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags;
+ pDeviceInfo->nativeDataFormatCount += 1;
+ }
+}
+
+
+MA_API const char* ma_get_backend_name(ma_backend backend)
+{
+ switch (backend)
+ {
+ case ma_backend_wasapi: return "WASAPI";
+ case ma_backend_dsound: return "DirectSound";
+ case ma_backend_winmm: return "WinMM";
+ case ma_backend_coreaudio: return "Core Audio";
+ case ma_backend_sndio: return "sndio";
+ case ma_backend_audio4: return "audio(4)";
+ case ma_backend_oss: return "OSS";
+ case ma_backend_pulseaudio: return "PulseAudio";
+ case ma_backend_alsa: return "ALSA";
+ case ma_backend_jack: return "JACK";
+ case ma_backend_aaudio: return "AAudio";
+ case ma_backend_opensl: return "OpenSL|ES";
+ case ma_backend_webaudio: return "Web Audio";
+ case ma_backend_custom: return "Custom";
+ case ma_backend_null: return "Null";
+ default: return "Unknown";
+ }
+}
+
+MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend)
+{
+ /*
+ This looks a little bit gross, but we want all backends to be included in the switch to avoid warnings on some compilers
+ about some enums not being handled by the switch statement.
+ */
+ switch (backend)
+ {
+ case ma_backend_wasapi:
+ #if defined(MA_HAS_WASAPI)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_dsound:
+ #if defined(MA_HAS_DSOUND)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_winmm:
+ #if defined(MA_HAS_WINMM)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_coreaudio:
+ #if defined(MA_HAS_COREAUDIO)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_sndio:
+ #if defined(MA_HAS_SNDIO)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_audio4:
+ #if defined(MA_HAS_AUDIO4)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_oss:
+ #if defined(MA_HAS_OSS)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_pulseaudio:
+ #if defined(MA_HAS_PULSEAUDIO)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_alsa:
+ #if defined(MA_HAS_ALSA)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_jack:
+ #if defined(MA_HAS_JACK)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_aaudio:
+ #if defined(MA_HAS_AAUDIO)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_opensl:
+ #if defined(MA_HAS_OPENSL)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_webaudio:
+ #if defined(MA_HAS_WEBAUDIO)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_custom:
+ #if defined(MA_HAS_CUSTOM)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+ case ma_backend_null:
+ #if defined(MA_HAS_NULL)
+ return MA_TRUE;
+ #else
+ return MA_FALSE;
+ #endif
+
+ default: return MA_FALSE;
+ }
+}
+
+MA_API ma_result ma_get_enabled_backends(ma_backend* pBackends, size_t backendCap, size_t* pBackendCount)
+{
+ size_t backendCount;
+ size_t iBackend;
+ ma_result result = MA_SUCCESS;
+
+ if (pBackendCount == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ backendCount = 0;
+
+ for (iBackend = 0; iBackend <= ma_backend_null; iBackend += 1) {
+ ma_backend backend = (ma_backend)iBackend;
+
+ if (ma_is_backend_enabled(backend)) {
+ /* The backend is enabled. Try adding it to the list. If there's no room, MA_NO_SPACE needs to be returned. */
+ if (backendCount == backendCap) {
+ result = MA_NO_SPACE;
+ break;
+ } else {
+ pBackends[backendCount] = backend;
+ backendCount += 1;
+ }
+ }
+ }
+
+ if (pBackendCount != NULL) {
+ *pBackendCount = backendCount;
+ }
+
+ return result;
+}
+
+MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend)
+{
+ switch (backend)
+ {
+ case ma_backend_wasapi: return MA_TRUE;
+ case ma_backend_dsound: return MA_FALSE;
+ case ma_backend_winmm: return MA_FALSE;
+ case ma_backend_coreaudio: return MA_FALSE;
+ case ma_backend_sndio: return MA_FALSE;
+ case ma_backend_audio4: return MA_FALSE;
+ case ma_backend_oss: return MA_FALSE;
+ case ma_backend_pulseaudio: return MA_FALSE;
+ case ma_backend_alsa: return MA_FALSE;
+ case ma_backend_jack: return MA_FALSE;
+ case ma_backend_aaudio: return MA_FALSE;
+ case ma_backend_opensl: return MA_FALSE;
+ case ma_backend_webaudio: return MA_FALSE;
+ case ma_backend_custom: return MA_FALSE; /* <-- Will depend on the implementation of the backend. */
+ case ma_backend_null: return MA_FALSE;
+ default: return MA_FALSE;
+ }
+}
+
+
+
+#ifdef MA_WIN32
+/* WASAPI error codes. */
+#define MA_AUDCLNT_E_NOT_INITIALIZED ((HRESULT)0x88890001)
+#define MA_AUDCLNT_E_ALREADY_INITIALIZED ((HRESULT)0x88890002)
+#define MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE ((HRESULT)0x88890003)
+#define MA_AUDCLNT_E_DEVICE_INVALIDATED ((HRESULT)0x88890004)
+#define MA_AUDCLNT_E_NOT_STOPPED ((HRESULT)0x88890005)
+#define MA_AUDCLNT_E_BUFFER_TOO_LARGE ((HRESULT)0x88890006)
+#define MA_AUDCLNT_E_OUT_OF_ORDER ((HRESULT)0x88890007)
+#define MA_AUDCLNT_E_UNSUPPORTED_FORMAT ((HRESULT)0x88890008)
+#define MA_AUDCLNT_E_INVALID_SIZE ((HRESULT)0x88890009)
+#define MA_AUDCLNT_E_DEVICE_IN_USE ((HRESULT)0x8889000A)
+#define MA_AUDCLNT_E_BUFFER_OPERATION_PENDING ((HRESULT)0x8889000B)
+#define MA_AUDCLNT_E_THREAD_NOT_REGISTERED ((HRESULT)0x8889000C)
+#define MA_AUDCLNT_E_NO_SINGLE_PROCESS ((HRESULT)0x8889000D)
+#define MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED ((HRESULT)0x8889000E)
+#define MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED ((HRESULT)0x8889000F)
+#define MA_AUDCLNT_E_SERVICE_NOT_RUNNING ((HRESULT)0x88890010)
+#define MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ((HRESULT)0x88890011)
+#define MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY ((HRESULT)0x88890012)
+#define MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL ((HRESULT)0x88890013)
+#define MA_AUDCLNT_E_EVENTHANDLE_NOT_SET ((HRESULT)0x88890014)
+#define MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE ((HRESULT)0x88890015)
+#define MA_AUDCLNT_E_BUFFER_SIZE_ERROR ((HRESULT)0x88890016)
+#define MA_AUDCLNT_E_CPUUSAGE_EXCEEDED ((HRESULT)0x88890017)
+#define MA_AUDCLNT_E_BUFFER_ERROR ((HRESULT)0x88890018)
+#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED ((HRESULT)0x88890019)
+#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD ((HRESULT)0x88890020)
+#define MA_AUDCLNT_E_INVALID_STREAM_FLAG ((HRESULT)0x88890021)
+#define MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE ((HRESULT)0x88890022)
+#define MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES ((HRESULT)0x88890023)
+#define MA_AUDCLNT_E_OFFLOAD_MODE_ONLY ((HRESULT)0x88890024)
+#define MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY ((HRESULT)0x88890025)
+#define MA_AUDCLNT_E_RESOURCES_INVALIDATED ((HRESULT)0x88890026)
+#define MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED ((HRESULT)0x88890027)
+#define MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED ((HRESULT)0x88890028)
+#define MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED ((HRESULT)0x88890029)
+#define MA_AUDCLNT_E_HEADTRACKING_ENABLED ((HRESULT)0x88890030)
+#define MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED ((HRESULT)0x88890040)
+#define MA_AUDCLNT_S_BUFFER_EMPTY ((HRESULT)0x08890001)
+#define MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED ((HRESULT)0x08890002)
+#define MA_AUDCLNT_S_POSITION_STALLED ((HRESULT)0x08890003)
+
+#define MA_DS_OK ((HRESULT)0)
+#define MA_DS_NO_VIRTUALIZATION ((HRESULT)0x0878000A)
+#define MA_DSERR_ALLOCATED ((HRESULT)0x8878000A)
+#define MA_DSERR_CONTROLUNAVAIL ((HRESULT)0x8878001E)
+#define MA_DSERR_INVALIDPARAM ((HRESULT)0x80070057) /*E_INVALIDARG*/
+#define MA_DSERR_INVALIDCALL ((HRESULT)0x88780032)
+#define MA_DSERR_GENERIC ((HRESULT)0x80004005) /*E_FAIL*/
+#define MA_DSERR_PRIOLEVELNEEDED ((HRESULT)0x88780046)
+#define MA_DSERR_OUTOFMEMORY ((HRESULT)0x8007000E) /*E_OUTOFMEMORY*/
+#define MA_DSERR_BADFORMAT ((HRESULT)0x88780064)
+#define MA_DSERR_UNSUPPORTED ((HRESULT)0x80004001) /*E_NOTIMPL*/
+#define MA_DSERR_NODRIVER ((HRESULT)0x88780078)
+#define MA_DSERR_ALREADYINITIALIZED ((HRESULT)0x88780082)
+#define MA_DSERR_NOAGGREGATION ((HRESULT)0x80040110) /*CLASS_E_NOAGGREGATION*/
+#define MA_DSERR_BUFFERLOST ((HRESULT)0x88780096)
+#define MA_DSERR_OTHERAPPHASPRIO ((HRESULT)0x887800A0)
+#define MA_DSERR_UNINITIALIZED ((HRESULT)0x887800AA)
+#define MA_DSERR_NOINTERFACE ((HRESULT)0x80004002) /*E_NOINTERFACE*/
+#define MA_DSERR_ACCESSDENIED ((HRESULT)0x80070005) /*E_ACCESSDENIED*/
+#define MA_DSERR_BUFFERTOOSMALL ((HRESULT)0x887800B4)
+#define MA_DSERR_DS8_REQUIRED ((HRESULT)0x887800BE)
+#define MA_DSERR_SENDLOOP ((HRESULT)0x887800C8)
+#define MA_DSERR_BADSENDBUFFERGUID ((HRESULT)0x887800D2)
+#define MA_DSERR_OBJECTNOTFOUND ((HRESULT)0x88781161)
+#define MA_DSERR_FXUNAVAILABLE ((HRESULT)0x887800DC)
+
+static ma_result ma_result_from_HRESULT(HRESULT hr)
+{
+ switch (hr)
+ {
+ case NOERROR: return MA_SUCCESS;
+ /*case S_OK: return MA_SUCCESS;*/
+
+ case E_POINTER: return MA_INVALID_ARGS;
+ case E_UNEXPECTED: return MA_ERROR;
+ case E_NOTIMPL: return MA_NOT_IMPLEMENTED;
+ case E_OUTOFMEMORY: return MA_OUT_OF_MEMORY;
+ case E_INVALIDARG: return MA_INVALID_ARGS;
+ case E_NOINTERFACE: return MA_API_NOT_FOUND;
+ case E_HANDLE: return MA_INVALID_ARGS;
+ case E_ABORT: return MA_ERROR;
+ case E_FAIL: return MA_ERROR;
+ case E_ACCESSDENIED: return MA_ACCESS_DENIED;
+
+ /* WASAPI */
+ case MA_AUDCLNT_E_NOT_INITIALIZED: return MA_DEVICE_NOT_INITIALIZED;
+ case MA_AUDCLNT_E_ALREADY_INITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED;
+ case MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_DEVICE_INVALIDATED: return MA_UNAVAILABLE;
+ case MA_AUDCLNT_E_NOT_STOPPED: return MA_DEVICE_NOT_STOPPED;
+ case MA_AUDCLNT_E_BUFFER_TOO_LARGE: return MA_TOO_BIG;
+ case MA_AUDCLNT_E_OUT_OF_ORDER: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_UNSUPPORTED_FORMAT: return MA_FORMAT_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_INVALID_SIZE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_DEVICE_IN_USE: return MA_BUSY;
+ case MA_AUDCLNT_E_BUFFER_OPERATION_PENDING: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_THREAD_NOT_REGISTERED: return MA_DOES_NOT_EXIST;
+ case MA_AUDCLNT_E_NO_SINGLE_PROCESS: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: return MA_SHARE_MODE_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED: return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ case MA_AUDCLNT_E_SERVICE_NOT_RUNNING: return MA_NOT_CONNECTED;
+ case MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY: return MA_SHARE_MODE_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_EVENTHANDLE_NOT_SET: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_BUFFER_SIZE_ERROR: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_CPUUSAGE_EXCEEDED: return MA_ERROR;
+ case MA_AUDCLNT_E_BUFFER_ERROR: return MA_ERROR;
+ case MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INVALID_DEVICE_PERIOD: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INVALID_STREAM_FLAG: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES: return MA_OUT_OF_MEMORY;
+ case MA_AUDCLNT_E_OFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_RESOURCES_INVALIDATED: return MA_INVALID_DATA;
+ case MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_HEADTRACKING_ENABLED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_S_BUFFER_EMPTY: return MA_NO_SPACE;
+ case MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED: return MA_ALREADY_EXISTS;
+ case MA_AUDCLNT_S_POSITION_STALLED: return MA_ERROR;
+
+ /* DirectSound */
+ /*case MA_DS_OK: return MA_SUCCESS;*/ /* S_OK */
+ case MA_DS_NO_VIRTUALIZATION: return MA_SUCCESS;
+ case MA_DSERR_ALLOCATED: return MA_ALREADY_IN_USE;
+ case MA_DSERR_CONTROLUNAVAIL: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_INVALIDPARAM: return MA_INVALID_ARGS;*/ /* E_INVALIDARG */
+ case MA_DSERR_INVALIDCALL: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_GENERIC: return MA_ERROR;*/ /* E_FAIL */
+ case MA_DSERR_PRIOLEVELNEEDED: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_OUTOFMEMORY: return MA_OUT_OF_MEMORY;*/ /* E_OUTOFMEMORY */
+ case MA_DSERR_BADFORMAT: return MA_FORMAT_NOT_SUPPORTED;
+ /*case MA_DSERR_UNSUPPORTED: return MA_NOT_IMPLEMENTED;*/ /* E_NOTIMPL */
+ case MA_DSERR_NODRIVER: return MA_FAILED_TO_INIT_BACKEND;
+ case MA_DSERR_ALREADYINITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED;
+ case MA_DSERR_NOAGGREGATION: return MA_ERROR;
+ case MA_DSERR_BUFFERLOST: return MA_UNAVAILABLE;
+ case MA_DSERR_OTHERAPPHASPRIO: return MA_ACCESS_DENIED;
+ case MA_DSERR_UNINITIALIZED: return MA_DEVICE_NOT_INITIALIZED;
+ /*case MA_DSERR_NOINTERFACE: return MA_API_NOT_FOUND;*/ /* E_NOINTERFACE */
+ /*case MA_DSERR_ACCESSDENIED: return MA_ACCESS_DENIED;*/ /* E_ACCESSDENIED */
+ case MA_DSERR_BUFFERTOOSMALL: return MA_NO_SPACE;
+ case MA_DSERR_DS8_REQUIRED: return MA_INVALID_OPERATION;
+ case MA_DSERR_SENDLOOP: return MA_DEADLOCK;
+ case MA_DSERR_BADSENDBUFFERGUID: return MA_INVALID_ARGS;
+ case MA_DSERR_OBJECTNOTFOUND: return MA_NO_DEVICE;
+ case MA_DSERR_FXUNAVAILABLE: return MA_UNAVAILABLE;
+
+ default: return MA_ERROR;
+ }
+}
+
+typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(LPVOID pvReserved, DWORD dwCoInit);
+typedef void (WINAPI * MA_PFN_CoUninitialize)(void);
+typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(REFCLSID rclsid, LPUNKNOWN pUnkOuter, DWORD dwClsContext, REFIID riid, LPVOID *ppv);
+typedef void (WINAPI * MA_PFN_CoTaskMemFree)(LPVOID pv);
+typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(PROPVARIANT *pvar);
+typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, LPOLESTR lpsz, int cchMax);
+
+typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)(void);
+typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)(void);
+
+#if defined(MA_WIN32_DESKTOP)
+/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */
+typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, LPCSTR lpSubKey, DWORD ulOptions, REGSAM samDesired, PHKEY phkResult);
+typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey);
+typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, LPCSTR lpValueName, LPDWORD lpReserved, LPDWORD lpType, LPBYTE lpData, LPDWORD lpcbData);
+#endif /* MA_WIN32_DESKTOP */
+#endif /* MA_WIN32 */
+
+
+#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device"
+#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device"
+
+
+
+
+/*******************************************************************************
+
+Timing
+
+*******************************************************************************/
+#ifdef MA_WIN32
+ static LARGE_INTEGER g_ma_TimerFrequency; /* <-- Initialized to zero since it's static. */
+ void ma_timer_init(ma_timer* pTimer)
+ {
+ LARGE_INTEGER counter;
+
+ if (g_ma_TimerFrequency.QuadPart == 0) {
+ QueryPerformanceFrequency(&g_ma_TimerFrequency);
+ }
+
+ QueryPerformanceCounter(&counter);
+ pTimer->counter = counter.QuadPart;
+ }
+
+ double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+ {
+ LARGE_INTEGER counter;
+ if (!QueryPerformanceCounter(&counter)) {
+ return 0;
+ }
+
+ return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart;
+ }
+#elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200)
+ static ma_uint64 g_ma_TimerFrequency = 0;
+ static void ma_timer_init(ma_timer* pTimer)
+ {
+ mach_timebase_info_data_t baseTime;
+ mach_timebase_info(&baseTime);
+ g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer;
+
+ pTimer->counter = mach_absolute_time();
+ }
+
+ static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+ {
+ ma_uint64 newTimeCounter = mach_absolute_time();
+ ma_uint64 oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency;
+ }
+#elif defined(MA_EMSCRIPTEN)
+ static MA_INLINE void ma_timer_init(ma_timer* pTimer)
+ {
+ pTimer->counterD = emscripten_get_now();
+ }
+
+ static MA_INLINE double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+ {
+ return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */
+ }
+#else
+ #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L
+ #if defined(CLOCK_MONOTONIC)
+ #define MA_CLOCK_ID CLOCK_MONOTONIC
+ #else
+ #define MA_CLOCK_ID CLOCK_REALTIME
+ #endif
+
+ static void ma_timer_init(ma_timer* pTimer)
+ {
+ struct timespec newTime;
+ clock_gettime(MA_CLOCK_ID, &newTime);
+
+ pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
+ }
+
+ static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+ {
+ ma_uint64 newTimeCounter;
+ ma_uint64 oldTimeCounter;
+
+ struct timespec newTime;
+ clock_gettime(MA_CLOCK_ID, &newTime);
+
+ newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
+ oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / 1000000000.0;
+ }
+ #else
+ static void ma_timer_init(ma_timer* pTimer)
+ {
+ struct timeval newTime;
+ gettimeofday(&newTime, NULL);
+
+ pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
+ }
+
+ static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+ {
+ ma_uint64 newTimeCounter;
+ ma_uint64 oldTimeCounter;
+
+ struct timeval newTime;
+ gettimeofday(&newTime, NULL);
+
+ newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
+ oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / 1000000.0;
+ }
+ #endif
+#endif
+
+
+/*******************************************************************************
+
+Dynamic Linking
+
+*******************************************************************************/
+MA_API ma_handle ma_dlopen(ma_context* pContext, const char* filename)
+{
+ ma_handle handle;
+
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Loading library: %s\n", filename);
+
+#ifdef _WIN32
+#ifdef MA_WIN32_DESKTOP
+ handle = (ma_handle)LoadLibraryA(filename);
+#else
+ /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */
+ WCHAR filenameW[4096];
+ if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) {
+ handle = NULL;
+ } else {
+ handle = (ma_handle)LoadPackagedLibrary(filenameW, 0);
+ }
+#endif
+#else
+ handle = (ma_handle)dlopen(filename, RTLD_NOW);
+#endif
+
+ /*
+ I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority
+ backend is a deliberate design choice. Instead I'm logging it as an informational message.
+ */
+ if (handle == NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "Failed to load library: %s\n", filename);
+ }
+
+ (void)pContext; /* It's possible for pContext to be unused. */
+ return handle;
+}
+
+MA_API void ma_dlclose(ma_context* pContext, ma_handle handle)
+{
+#ifdef _WIN32
+ FreeLibrary((HMODULE)handle);
+#else
+ dlclose((void*)handle);
+#endif
+
+ (void)pContext;
+}
+
+MA_API ma_proc ma_dlsym(ma_context* pContext, ma_handle handle, const char* symbol)
+{
+ ma_proc proc;
+
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Loading symbol: %s\n", symbol);
+
+#ifdef _WIN32
+ proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol);
+#else
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+ proc = (ma_proc)dlsym((void*)handle, symbol);
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+ #pragma GCC diagnostic pop
+#endif
+#endif
+
+ if (proc == NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Failed to load symbol: %s\n", symbol);
+ }
+
+ (void)pContext; /* It's possible for pContext to be unused. */
+ return proc;
+}
+
+
+#if 0
+static ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn)
+{
+ ma_uint32 closestRate = 0;
+ ma_uint32 closestDiff = 0xFFFFFFFF;
+ size_t iStandardRate;
+
+ for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
+ ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
+ ma_uint32 diff;
+
+ if (sampleRateIn > standardRate) {
+ diff = sampleRateIn - standardRate;
+ } else {
+ diff = standardRate - sampleRateIn;
+ }
+
+ if (diff == 0) {
+ return standardRate; /* The input sample rate is a standard rate. */
+ }
+
+ if (closestDiff > diff) {
+ closestDiff = diff;
+ closestRate = standardRate;
+ }
+ }
+
+ return closestRate;
+}
+#endif
+
+
+static MA_INLINE unsigned int ma_device_disable_denormals(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (!pDevice->noDisableDenormals) {
+ return ma_disable_denormals();
+ } else {
+ return 0;
+ }
+}
+
+static MA_INLINE void ma_device_restore_denormals(ma_device* pDevice, unsigned int prevState)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (!pDevice->noDisableDenormals) {
+ ma_restore_denormals(prevState);
+ } else {
+ /* Do nothing. */
+ (void)prevState;
+ }
+}
+
+static ma_device_notification ma_device_notification_init(ma_device* pDevice, ma_device_notification_type type)
+{
+ ma_device_notification notification;
+
+ MA_ZERO_OBJECT(¬ification);
+ notification.pDevice = pDevice;
+ notification.type = type;
+
+ return notification;
+}
+
+static void ma_device__on_notification(ma_device_notification notification)
+{
+ MA_ASSERT(notification.pDevice != NULL);
+
+ if (notification.pDevice->onNotification != NULL) {
+ notification.pDevice->onNotification(¬ification);
+ }
+
+ /* TEMP FOR COMPATIBILITY: If it's a stopped notification, fire the onStop callback as well. This is only for backwards compatibility and will be removed. */
+ if (notification.pDevice->onStop != NULL && notification.type == ma_device_notification_type_stopped) {
+ notification.pDevice->onStop(notification.pDevice);
+ }
+}
+
+void ma_device__on_notification_started(ma_device* pDevice)
+{
+ ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_started));
+}
+
+void ma_device__on_notification_stopped(ma_device* pDevice)
+{
+ ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_stopped));
+}
+
+void ma_device__on_notification_rerouted(ma_device* pDevice)
+{
+ ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_rerouted));
+}
+
+void ma_device__on_notification_interruption_began(ma_device* pDevice)
+{
+ ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_began));
+}
+
+void ma_device__on_notification_interruption_ended(ma_device* pDevice)
+{
+ ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_ended));
+}
+
+
+static void ma_device__on_data_inner(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pDevice->onData != NULL);
+
+ if (!pDevice->noPreSilencedOutputBuffer && pFramesOut != NULL) {
+ ma_silence_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels);
+ }
+
+ pDevice->onData(pDevice, pFramesOut, pFramesIn, frameCount);
+}
+
+static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->noFixedSizedCallback) {
+ /* Fast path. Not using a fixed sized callback. Process directly from the specified buffers. */
+ ma_device__on_data_inner(pDevice, pFramesOut, pFramesIn, frameCount);
+ } else {
+ /* Slow path. Using a fixed sized callback. Need to use the intermediary buffer. */
+ ma_uint32 totalFramesProcessed = 0;
+
+ while (totalFramesProcessed < frameCount) {
+ ma_uint32 totalFramesRemaining = frameCount - totalFramesProcessed;
+ ma_uint32 framesToProcessThisIteration = 0;
+
+ if (pFramesIn != NULL) {
+ /* Capturing. Write to the intermediary buffer. If there's no room, fire the callback to empty it. */
+ if (pDevice->capture.intermediaryBufferLen < pDevice->capture.intermediaryBufferCap) {
+ /* There's some room left in the intermediary buffer. Write to it without firing the callback. */
+ framesToProcessThisIteration = totalFramesRemaining;
+ if (framesToProcessThisIteration > pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen) {
+ framesToProcessThisIteration = pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen;
+ }
+
+ ma_copy_pcm_frames(
+ ma_offset_pcm_frames_ptr(pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferLen, pDevice->capture.format, pDevice->capture.channels),
+ ma_offset_pcm_frames_const_ptr(pFramesIn, totalFramesProcessed, pDevice->capture.format, pDevice->capture.channels),
+ framesToProcessThisIteration,
+ pDevice->capture.format, pDevice->capture.channels);
+
+ pDevice->capture.intermediaryBufferLen += framesToProcessThisIteration;
+ }
+
+ if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) {
+ /* No room left in the intermediary buffer. Fire the data callback. */
+ if (pDevice->type == ma_device_type_duplex) {
+ /* We'll do the duplex data callback later after we've processed the playback data. */
+ } else {
+ ma_device__on_data_inner(pDevice, NULL, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap);
+
+ /* The intermediary buffer has just been drained. */
+ pDevice->capture.intermediaryBufferLen = 0;
+ }
+ }
+ }
+
+ if (pFramesOut != NULL) {
+ /* Playing back. Read from the intermediary buffer. If there's nothing in it, fire the callback to fill it. */
+ if (pDevice->playback.intermediaryBufferLen > 0) {
+ /* There's some content in the intermediary buffer. Read from that without firing the callback. */
+ if (pDevice->type == ma_device_type_duplex) {
+ /* The frames processed this iteration for a duplex device will always be based on the capture side. Leave it unmodified. */
+ } else {
+ framesToProcessThisIteration = totalFramesRemaining;
+ if (framesToProcessThisIteration > pDevice->playback.intermediaryBufferLen) {
+ framesToProcessThisIteration = pDevice->playback.intermediaryBufferLen;
+ }
+ }
+
+ ma_copy_pcm_frames(
+ ma_offset_pcm_frames_ptr(pFramesOut, totalFramesProcessed, pDevice->playback.format, pDevice->playback.channels),
+ ma_offset_pcm_frames_ptr(pDevice->playback.pIntermediaryBuffer, pDevice->playback.intermediaryBufferCap - pDevice->playback.intermediaryBufferLen, pDevice->playback.format, pDevice->playback.channels),
+ framesToProcessThisIteration,
+ pDevice->playback.format, pDevice->playback.channels);
+
+ pDevice->playback.intermediaryBufferLen -= framesToProcessThisIteration;
+ }
+
+ if (pDevice->playback.intermediaryBufferLen == 0) {
+ /* There's nothing in the intermediary buffer. Fire the data callback to fill it. */
+ if (pDevice->type == ma_device_type_duplex) {
+ /* In duplex mode, the data callback will be fired later. Nothing to do here. */
+ } else {
+ ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, NULL, pDevice->playback.intermediaryBufferCap);
+
+ /* The intermediary buffer has just been filled. */
+ pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap;
+ }
+ }
+ }
+
+ /* If we're in duplex mode we might need to do a refill of the data. */
+ if (pDevice->type == ma_device_type_duplex) {
+ if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) {
+ ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap);
+
+ pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap; /* The playback buffer will have just been filled. */
+ pDevice->capture.intermediaryBufferLen = 0; /* The intermediary buffer has just been drained. */
+ }
+ }
+
+ /* Make sure this is only incremented once in the duplex case. */
+ totalFramesProcessed += framesToProcessThisIteration;
+ }
+ }
+}
+
+static void ma_device__handle_data_callback(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
+{
+ float masterVolumeFactor;
+
+ ma_device_get_master_volume(pDevice, &masterVolumeFactor); /* Use ma_device_get_master_volume() to ensure the volume is loaded atomically. */
+
+ if (pDevice->onData) {
+ unsigned int prevDenormalState = ma_device_disable_denormals(pDevice);
+ {
+ /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */
+ if (pFramesIn != NULL && masterVolumeFactor < 1) {
+ ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint32 totalFramesProcessed = 0;
+ while (totalFramesProcessed < frameCount) {
+ ma_uint32 framesToProcessThisIteration = frameCount - totalFramesProcessed;
+ if (framesToProcessThisIteration > sizeof(tempFramesIn)/bpfCapture) {
+ framesToProcessThisIteration = sizeof(tempFramesIn)/bpfCapture;
+ }
+
+ ma_copy_and_apply_volume_factor_pcm_frames(tempFramesIn, ma_offset_ptr(pFramesIn, totalFramesProcessed*bpfCapture), framesToProcessThisIteration, pDevice->capture.format, pDevice->capture.channels, masterVolumeFactor);
+
+ ma_device__on_data(pDevice, ma_offset_ptr(pFramesOut, totalFramesProcessed*bpfPlayback), tempFramesIn, framesToProcessThisIteration);
+
+ totalFramesProcessed += framesToProcessThisIteration;
+ }
+ } else {
+ ma_device__on_data(pDevice, pFramesOut, pFramesIn, frameCount);
+ }
+
+ /* Volume control and clipping for playback devices. */
+ if (pFramesOut != NULL) {
+ if (masterVolumeFactor < 1) {
+ if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */
+ ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor);
+ }
+ }
+
+ if (!pDevice->noClip && pDevice->playback.format == ma_format_f32) {
+ ma_clip_samples_f32((float*)pFramesOut, (const float*)pFramesOut, frameCount * pDevice->playback.channels); /* Intentionally specifying the same pointer for both input and output for in-place processing. */
+ }
+ }
+ }
+ ma_device_restore_denormals(pDevice, prevDenormalState);
+ }
+}
+
+
+
+/* A helper function for reading sample data from the client. */
+static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCount > 0);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pDevice->playback.converter.isPassthrough) {
+ ma_device__handle_data_callback(pDevice, pFramesOut, NULL, frameCount);
+ } else {
+ ma_result result;
+ ma_uint64 totalFramesReadOut;
+ void* pRunningFramesOut;
+
+ totalFramesReadOut = 0;
+ pRunningFramesOut = pFramesOut;
+
+ /*
+ We run slightly different logic depending on whether or not we're using a heap-allocated
+ buffer for caching input data. This will be the case if the data converter does not have
+ the ability to retrieve the required input frame count for a given output frame count.
+ */
+ if (pDevice->playback.pInputCache != NULL) {
+ while (totalFramesReadOut < frameCount) {
+ ma_uint64 framesToReadThisIterationIn;
+ ma_uint64 framesToReadThisIterationOut;
+
+ /* If there's any data available in the cache, that needs to get processed first. */
+ if (pDevice->playback.inputCacheRemaining > 0) {
+ framesToReadThisIterationOut = (frameCount - totalFramesReadOut);
+ framesToReadThisIterationIn = framesToReadThisIterationOut;
+ if (framesToReadThisIterationIn > pDevice->playback.inputCacheRemaining) {
+ framesToReadThisIterationIn = pDevice->playback.inputCacheRemaining;
+ }
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesToReadThisIterationIn, pRunningFramesOut, &framesToReadThisIterationOut);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ pDevice->playback.inputCacheConsumed += framesToReadThisIterationIn;
+ pDevice->playback.inputCacheRemaining -= framesToReadThisIterationIn;
+
+ totalFramesReadOut += framesToReadThisIterationOut;
+ pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesToReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+
+ if (framesToReadThisIterationIn == 0 && framesToReadThisIterationOut == 0) {
+ break; /* We're done. */
+ }
+ }
+
+ /* Getting here means there's no data in the cache and we need to fill it up with data from the client. */
+ if (pDevice->playback.inputCacheRemaining == 0) {
+ ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, NULL, (ma_uint32)pDevice->playback.inputCacheCap);
+
+ pDevice->playback.inputCacheConsumed = 0;
+ pDevice->playback.inputCacheRemaining = pDevice->playback.inputCacheCap;
+ }
+ }
+ } else {
+ while (totalFramesReadOut < frameCount) {
+ ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */
+ ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 framesToReadThisIterationIn;
+ ma_uint64 framesReadThisIterationIn;
+ ma_uint64 framesToReadThisIterationOut;
+ ma_uint64 framesReadThisIterationOut;
+ ma_uint64 requiredInputFrameCount;
+
+ framesToReadThisIterationOut = (frameCount - totalFramesReadOut);
+ framesToReadThisIterationIn = framesToReadThisIterationOut;
+ if (framesToReadThisIterationIn > intermediaryBufferCap) {
+ framesToReadThisIterationIn = intermediaryBufferCap;
+ }
+
+ ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, framesToReadThisIterationOut, &requiredInputFrameCount);
+ if (framesToReadThisIterationIn > requiredInputFrameCount) {
+ framesToReadThisIterationIn = requiredInputFrameCount;
+ }
+
+ if (framesToReadThisIterationIn > 0) {
+ ma_device__handle_data_callback(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn);
+ }
+
+ /*
+ At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any
+ input frames, we still want to try processing frames because there may some output frames generated from cached input data.
+ */
+ framesReadThisIterationIn = framesToReadThisIterationIn;
+ framesReadThisIterationOut = framesToReadThisIterationOut;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ totalFramesReadOut += framesReadThisIterationOut;
+ pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+
+ if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) {
+ break; /* We're done. */
+ }
+ }
+ }
+ }
+}
+
+/* A helper for sending sample data to the client. */
+static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCountInDeviceFormat > 0);
+ MA_ASSERT(pFramesInDeviceFormat != NULL);
+
+ if (pDevice->capture.converter.isPassthrough) {
+ ma_device__handle_data_callback(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat);
+ } else {
+ ma_result result;
+ ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint64 totalDeviceFramesProcessed = 0;
+ ma_uint64 totalClientFramesProcessed = 0;
+ const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat;
+
+ /* We just keep going until we've exhaused all of our input frames and cannot generate any more output frames. */
+ for (;;) {
+ ma_uint64 deviceFramesProcessedThisIteration;
+ ma_uint64 clientFramesProcessedThisIteration;
+
+ deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed);
+ clientFramesProcessedThisIteration = framesInClientFormatCap;
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ if (clientFramesProcessedThisIteration > 0) {
+ ma_device__handle_data_callback(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */
+ }
+
+ pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ totalDeviceFramesProcessed += deviceFramesProcessedThisIteration;
+ totalClientFramesProcessed += clientFramesProcessedThisIteration;
+
+ if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) {
+ break; /* We're done. */
+ }
+ }
+ }
+}
+
+static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB)
+{
+ ma_result result;
+ ma_uint32 totalDeviceFramesProcessed = 0;
+ const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCountInDeviceFormat > 0);
+ MA_ASSERT(pFramesInDeviceFormat != NULL);
+ MA_ASSERT(pRB != NULL);
+
+ /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */
+ for (;;) {
+ ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed);
+ ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint64 framesProcessedInDeviceFormat;
+ ma_uint64 framesProcessedInClientFormat;
+ void* pFramesInClientFormat;
+
+ result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer.");
+ break;
+ }
+
+ if (framesToProcessInClientFormat == 0) {
+ if (ma_pcm_rb_pointer_distance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) {
+ break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */
+ }
+ }
+
+ /* Convert. */
+ framesProcessedInDeviceFormat = framesToProcessInDeviceFormat;
+ framesProcessedInClientFormat = framesToProcessInClientFormat;
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInClientFormat); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer.");
+ break;
+ }
+
+ pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */
+
+ /* We're done when we're unable to process any client nor device frames. */
+ if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) {
+ break; /* Done. */
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB)
+{
+ ma_result result;
+ ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 totalFramesReadOut = 0;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCount > 0);
+ MA_ASSERT(pFramesInInternalFormat != NULL);
+ MA_ASSERT(pRB != NULL);
+ MA_ASSERT(pDevice->playback.pInputCache != NULL);
+
+ /*
+ Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for
+ the whole frameCount frames we just use silence instead for the input data.
+ */
+ MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames));
+
+ while (totalFramesReadOut < frameCount && ma_device_is_started(pDevice)) {
+ /*
+ We should have a buffer allocated on the heap. Any playback frames still sitting in there
+ need to be sent to the internal device before we process any more data from the client.
+ */
+ if (pDevice->playback.inputCacheRemaining > 0) {
+ ma_uint64 framesConvertedIn = pDevice->playback.inputCacheRemaining;
+ ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut);
+ ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut);
+
+ pDevice->playback.inputCacheConsumed += framesConvertedIn;
+ pDevice->playback.inputCacheRemaining -= framesConvertedIn;
+
+ totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */
+ pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ }
+
+ /* If there's no more data in the cache we'll need to fill it with some. */
+ if (totalFramesReadOut < frameCount && pDevice->playback.inputCacheRemaining == 0) {
+ ma_uint32 inputFrameCount;
+ void* pInputFrames;
+
+ inputFrameCount = (ma_uint32)pDevice->playback.inputCacheCap;
+ result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames);
+ if (result == MA_SUCCESS) {
+ if (inputFrameCount > 0) {
+ ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, pInputFrames, inputFrameCount);
+ } else {
+ if (ma_pcm_rb_pointer_distance(pRB) == 0) {
+ break; /* Underrun. */
+ }
+ }
+ } else {
+ /* No capture data available. Feed in silence. */
+ inputFrameCount = (ma_uint32)ma_min(pDevice->playback.inputCacheCap, sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
+ ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, silentInputFrames, inputFrameCount);
+ }
+
+ pDevice->playback.inputCacheConsumed = 0;
+ pDevice->playback.inputCacheRemaining = inputFrameCount;
+
+ result = ma_pcm_rb_commit_read(pRB, inputFrameCount);
+ if (result != MA_SUCCESS) {
+ return result; /* Should never happen. */
+ }
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+/* A helper for changing the state of the device. */
+static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_device_state newState)
+{
+ c89atomic_exchange_i32((ma_int32*)&pDevice->state, (ma_int32)newState);
+}
+
+
+#ifdef MA_WIN32
+ GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
+ GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
+ /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
+ /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
+#endif
+
+
+
+MA_API ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */
+{
+ ma_uint32 i;
+ for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) {
+ if (g_maFormatPriorities[i] == format) {
+ return i;
+ }
+ }
+
+ /* Getting here means the format could not be found or is equal to ma_format_unknown. */
+ return (ma_uint32)-1;
+}
+
+static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType);
+
+static ma_bool32 ma_device_descriptor_is_valid(const ma_device_descriptor* pDeviceDescriptor)
+{
+ if (pDeviceDescriptor == NULL) {
+ return MA_FALSE;
+ }
+
+ if (pDeviceDescriptor->format == ma_format_unknown) {
+ return MA_FALSE;
+ }
+
+ if (pDeviceDescriptor->channels == 0 || pDeviceDescriptor->channels > MA_MAX_CHANNELS) {
+ return MA_FALSE;
+ }
+
+ if (pDeviceDescriptor->sampleRate == 0) {
+ return MA_FALSE;
+ }
+
+ return MA_TRUE;
+}
+
+
+static ma_result ma_device_audio_thread__default_read_write(ma_device* pDevice)
+{
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = 0;
+ ma_uint32 playbackDeviceDataCapInFrames = 0;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* Just some quick validation on the device type and the available callbacks. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ if (pDevice->pContext->callbacks.onDeviceRead == NULL) {
+ return MA_NOT_IMPLEMENTED;
+ }
+
+ capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->pContext->callbacks.onDeviceWrite == NULL) {
+ return MA_NOT_IMPLEMENTED;
+ }
+
+ playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ }
+
+ /* NOTE: The device was started outside of this function, in the worker thread. */
+
+ while (ma_device_get_state(pDevice) == ma_device_state_started && !exitLoop) {
+ switch (pDevice->type) {
+ case ma_device_type_duplex:
+ {
+ /* The process is: onDeviceRead() -> convert -> callback -> convert -> onDeviceWrite() */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
+
+ result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+
+ /* At this point we have our captured data in device format and we now need to convert it to client format. */
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__handle_data_callback(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__null()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
+
+ /* Make sure we don't get stuck in the inner loop. */
+ if (capturedDeviceFramesProcessed == 0) {
+ break;
+ }
+
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
+
+ case ma_device_type_capture:
+ case ma_device_type_loopback:
+ {
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > capturedDeviceDataCapInFrames) {
+ framesToReadThisIteration = capturedDeviceDataCapInFrames;
+ }
+
+ result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ /* Make sure we don't get stuck in the inner loop. */
+ if (framesProcessed == 0) {
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, capturedDeviceData);
+
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > playbackDeviceDataCapInFrames) {
+ framesToWriteThisIteration = playbackDeviceDataCapInFrames;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, playbackDeviceData);
+
+ result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ /* Make sure we don't get stuck in the inner loop. */
+ if (framesProcessed == 0) {
+ break;
+ }
+
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
+
+ /* Should never get here. */
+ default: break;
+ }
+ }
+
+ return result;
+}
+
+
+
+/*******************************************************************************
+
+Null Backend
+
+*******************************************************************************/
+#ifdef MA_HAS_NULL
+
+#define MA_DEVICE_OP_NONE__NULL 0
+#define MA_DEVICE_OP_START__NULL 1
+#define MA_DEVICE_OP_SUSPEND__NULL 2
+#define MA_DEVICE_OP_KILL__NULL 3
+
+static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
+{
+ ma_device* pDevice = (ma_device*)pData;
+ MA_ASSERT(pDevice != NULL);
+
+ for (;;) { /* Keep the thread alive until the device is uninitialized. */
+ ma_uint32 operation;
+
+ /* Wait for an operation to be requested. */
+ ma_event_wait(&pDevice->null_device.operationEvent);
+
+ /* At this point an event should have been triggered. */
+ operation = pDevice->null_device.operation;
+
+ /* Starting the device needs to put the thread into a loop. */
+ if (operation == MA_DEVICE_OP_START__NULL) {
+ /* Reset the timer just in case. */
+ ma_timer_init(&pDevice->null_device.timer);
+
+ /* Getting here means a suspend or kill operation has been requested. */
+ pDevice->null_device.operationResult = MA_SUCCESS;
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ ma_semaphore_release(&pDevice->null_device.operationSemaphore);
+ continue;
+ }
+
+ /* Suspending the device means we need to stop the timer and just continue the loop. */
+ if (operation == MA_DEVICE_OP_SUSPEND__NULL) {
+ /* We need to add the current run time to the prior run time, then reset the timer. */
+ pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer);
+ ma_timer_init(&pDevice->null_device.timer);
+
+ /* We're done. */
+ pDevice->null_device.operationResult = MA_SUCCESS;
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ ma_semaphore_release(&pDevice->null_device.operationSemaphore);
+ continue;
+ }
+
+ /* Killing the device means we need to get out of this loop so that this thread can terminate. */
+ if (operation == MA_DEVICE_OP_KILL__NULL) {
+ pDevice->null_device.operationResult = MA_SUCCESS;
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ ma_semaphore_release(&pDevice->null_device.operationSemaphore);
+ break;
+ }
+
+ /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */
+ if (operation == MA_DEVICE_OP_NONE__NULL) {
+ MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */
+ pDevice->null_device.operationResult = MA_INVALID_OPERATION;
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ ma_semaphore_release(&pDevice->null_device.operationSemaphore);
+ continue; /* Continue the loop. Don't terminate. */
+ }
+ }
+
+ return (ma_thread_result)0;
+}
+
+static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation)
+{
+ ma_result result;
+
+ /*
+ TODO: Need to review this and consider just using mutual exclusion. I think the original motivation
+ for this was to just post the event to a queue and return immediately, but that has since changed
+ and now this function is synchronous. I think this can be simplified to just use a mutex.
+ */
+
+ /*
+ The first thing to do is wait for an operation slot to become available. We only have a single slot for this, but we could extend this later
+ to support queing of operations.
+ */
+ result = ma_semaphore_wait(&pDevice->null_device.operationSemaphore);
+ if (result != MA_SUCCESS) {
+ return result; /* Failed to wait for the event. */
+ }
+
+ /*
+ When we get here it means the background thread is not referencing the operation code and it can be changed. After changing this we need to
+ signal an event to the worker thread to let it know that it can start work.
+ */
+ pDevice->null_device.operation = operation;
+
+ /* Once the operation code has been set, the worker thread can start work. */
+ if (ma_event_signal(&pDevice->null_device.operationEvent) != MA_SUCCESS) {
+ return MA_ERROR;
+ }
+
+ /* We want everything to be synchronous so we're going to wait for the worker thread to complete it's operation. */
+ if (ma_event_wait(&pDevice->null_device.operationCompletionEvent) != MA_SUCCESS) {
+ return MA_ERROR;
+ }
+
+ return pDevice->null_device.operationResult;
+}
+
+static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice)
+{
+ ma_uint32 internalSampleRate;
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ internalSampleRate = pDevice->capture.internalSampleRate;
+ } else {
+ internalSampleRate = pDevice->playback.internalSampleRate;
+ }
+
+ return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate);
+}
+
+static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+
+ (void)cbResult; /* Silence a static analysis warning. */
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ MA_ASSERT(pContext != NULL);
+
+ if (pDeviceID != NULL && pDeviceID->nullbackend != 0) {
+ return MA_NO_DEVICE; /* Don't know the device. */
+ }
+
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1);
+ }
+
+ pDeviceInfo->isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */
+
+ /* Support everything on the null backend. */
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_unknown;
+ pDeviceInfo->nativeDataFormats[0].channels = 0;
+ pDeviceInfo->nativeDataFormats[0].sampleRate = 0;
+ pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pDeviceInfo->nativeDataFormatCount = 1;
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_uninit__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ /* Keep it clean and wait for the device thread to finish before returning. */
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL);
+
+ /* Wait for the thread to finish before continuing. */
+ ma_thread_wait(&pDevice->null_device.deviceThread);
+
+ /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */
+ ma_semaphore_uninit(&pDevice->null_device.operationSemaphore);
+ ma_event_uninit(&pDevice->null_device.operationCompletionEvent);
+ ma_event_uninit(&pDevice->null_device.operationEvent);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init__null(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->null_device);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* The null backend supports everything exactly as we specify it. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ pDescriptorCapture->format = (pDescriptorCapture->format != ma_format_unknown) ? pDescriptorCapture->format : MA_DEFAULT_FORMAT;
+ pDescriptorCapture->channels = (pDescriptorCapture->channels != 0) ? pDescriptorCapture->channels : MA_DEFAULT_CHANNELS;
+ pDescriptorCapture->sampleRate = (pDescriptorCapture->sampleRate != 0) ? pDescriptorCapture->sampleRate : MA_DEFAULT_SAMPLE_RATE;
+
+ if (pDescriptorCapture->channelMap[0] == MA_CHANNEL_NONE) {
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels);
+ }
+
+ pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile);
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ pDescriptorPlayback->format = (pDescriptorPlayback->format != ma_format_unknown) ? pDescriptorPlayback->format : MA_DEFAULT_FORMAT;
+ pDescriptorPlayback->channels = (pDescriptorPlayback->channels != 0) ? pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS;
+ pDescriptorPlayback->sampleRate = (pDescriptorPlayback->sampleRate != 0) ? pDescriptorPlayback->sampleRate : MA_DEFAULT_SAMPLE_RATE;
+
+ if (pDescriptorPlayback->channelMap[0] == MA_CHANNEL_NONE) {
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorPlayback->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorPlayback->channels);
+ }
+
+ pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile);
+ }
+
+ /*
+ In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the
+ first period is "written" to it, and then stopped in ma_device_stop__null().
+ */
+ result = ma_event_init(&pDevice->null_device.operationEvent);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_event_init(&pDevice->null_device.operationCompletionEvent);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_semaphore_init(1, &pDevice->null_device.operationSemaphore); /* <-- It's important that the initial value is set to 1. */
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_thread_create(&pDevice->null_device.deviceThread, pDevice->pContext->threadPriority, 0, ma_device_thread__null, pDevice, &pDevice->pContext->allocationCallbacks);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL);
+
+ c89atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL);
+
+ c89atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalPCMFramesProcessed;
+ ma_bool32 wasStartedOnEntry;
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ wasStartedOnEntry = c89atomic_load_32(&pDevice->null_device.isStarted);
+
+ /* Keep going until everything has been read. */
+ totalPCMFramesProcessed = 0;
+ while (totalPCMFramesProcessed < frameCount) {
+ ma_uint64 targetFrame;
+
+ /* If there are any frames remaining in the current period, consume those first. */
+ if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) {
+ ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
+ ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback;
+ if (framesToProcess > framesRemaining) {
+ framesToProcess = framesRemaining;
+ }
+
+ /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */
+ (void)pPCMFrames;
+
+ pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess;
+ totalPCMFramesProcessed += framesToProcess;
+ }
+
+ /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
+ if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) {
+ pDevice->null_device.currentPeriodFramesRemainingPlayback = 0;
+
+ if (!c89atomic_load_32(&pDevice->null_device.isStarted) && !wasStartedOnEntry) {
+ result = ma_device_start__null(pDevice);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ /* If we've consumed the whole buffer we can return now. */
+ MA_ASSERT(totalPCMFramesProcessed <= frameCount);
+ if (totalPCMFramesProcessed == frameCount) {
+ break;
+ }
+
+ /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
+ targetFrame = pDevice->null_device.lastProcessedFramePlayback;
+ for (;;) {
+ ma_uint64 currentFrame;
+
+ /* Stop waiting if the device has been stopped. */
+ if (!c89atomic_load_32(&pDevice->null_device.isStarted)) {
+ break;
+ }
+
+ currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
+ if (currentFrame >= targetFrame) {
+ break;
+ }
+
+ /* Getting here means we haven't yet reached the target sample, so continue waiting. */
+ ma_sleep(10);
+ }
+
+ pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalPeriodSizeInFrames;
+ pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalPeriodSizeInFrames;
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalPCMFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalPCMFramesProcessed;
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
+
+ /* Keep going until everything has been read. */
+ totalPCMFramesProcessed = 0;
+ while (totalPCMFramesProcessed < frameCount) {
+ ma_uint64 targetFrame;
+
+ /* If there are any frames remaining in the current period, consume those first. */
+ if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) {
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
+ ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture;
+ if (framesToProcess > framesRemaining) {
+ framesToProcess = framesRemaining;
+ }
+
+ /* We need to ensure the output buffer is zeroed. */
+ MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf);
+
+ pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess;
+ totalPCMFramesProcessed += framesToProcess;
+ }
+
+ /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
+ if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) {
+ pDevice->null_device.currentPeriodFramesRemainingCapture = 0;
+ }
+
+ /* If we've consumed the whole buffer we can return now. */
+ MA_ASSERT(totalPCMFramesProcessed <= frameCount);
+ if (totalPCMFramesProcessed == frameCount) {
+ break;
+ }
+
+ /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
+ targetFrame = pDevice->null_device.lastProcessedFrameCapture + pDevice->capture.internalPeriodSizeInFrames;
+ for (;;) {
+ ma_uint64 currentFrame;
+
+ /* Stop waiting if the device has been stopped. */
+ if (!c89atomic_load_32(&pDevice->null_device.isStarted)) {
+ break;
+ }
+
+ currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
+ if (currentFrame >= targetFrame) {
+ break;
+ }
+
+ /* Getting here means we haven't yet reached the target sample, so continue waiting. */
+ ma_sleep(10);
+ }
+
+ pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalPeriodSizeInFrames;
+ pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalPeriodSizeInFrames;
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalPCMFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_context_uninit__null(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_null);
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__null(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+ (void)pContext;
+
+ pCallbacks->onContextInit = ma_context_init__null;
+ pCallbacks->onContextUninit = ma_context_uninit__null;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__null;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__null;
+ pCallbacks->onDeviceInit = ma_device_init__null;
+ pCallbacks->onDeviceUninit = ma_device_uninit__null;
+ pCallbacks->onDeviceStart = ma_device_start__null;
+ pCallbacks->onDeviceStop = ma_device_stop__null;
+ pCallbacks->onDeviceRead = ma_device_read__null;
+ pCallbacks->onDeviceWrite = ma_device_write__null;
+ pCallbacks->onDeviceDataLoop = NULL; /* Our backend is asynchronous with a blocking read-write API which means we can get miniaudio to deal with the audio thread. */
+
+ /* The null backend always works. */
+ return MA_SUCCESS;
+}
+#endif
+
+
+
+/*******************************************************************************
+
+WIN32 COMMON
+
+*******************************************************************************/
+#if defined(MA_WIN32)
+#if defined(MA_WIN32_DESKTOP)
+ #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit)
+ #define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)()
+ #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv)
+ #define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv)
+ #define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar)
+#else
+ #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit)
+ #define ma_CoUninitialize(pContext) CoUninitialize()
+ #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv)
+ #define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv)
+ #define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar)
+#endif
+
+#if !defined(MAXULONG_PTR) && !defined(__WATCOMC__)
+typedef size_t DWORD_PTR;
+#endif
+
+#if !defined(WAVE_FORMAT_44M08)
+#define WAVE_FORMAT_44M08 0x00000100
+#define WAVE_FORMAT_44S08 0x00000200
+#define WAVE_FORMAT_44M16 0x00000400
+#define WAVE_FORMAT_44S16 0x00000800
+#define WAVE_FORMAT_48M08 0x00001000
+#define WAVE_FORMAT_48S08 0x00002000
+#define WAVE_FORMAT_48M16 0x00004000
+#define WAVE_FORMAT_48S16 0x00008000
+#define WAVE_FORMAT_96M08 0x00010000
+#define WAVE_FORMAT_96S08 0x00020000
+#define WAVE_FORMAT_96M16 0x00040000
+#define WAVE_FORMAT_96S16 0x00080000
+#endif
+
+#ifndef SPEAKER_FRONT_LEFT
+#define SPEAKER_FRONT_LEFT 0x1
+#define SPEAKER_FRONT_RIGHT 0x2
+#define SPEAKER_FRONT_CENTER 0x4
+#define SPEAKER_LOW_FREQUENCY 0x8
+#define SPEAKER_BACK_LEFT 0x10
+#define SPEAKER_BACK_RIGHT 0x20
+#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40
+#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80
+#define SPEAKER_BACK_CENTER 0x100
+#define SPEAKER_SIDE_LEFT 0x200
+#define SPEAKER_SIDE_RIGHT 0x400
+#define SPEAKER_TOP_CENTER 0x800
+#define SPEAKER_TOP_FRONT_LEFT 0x1000
+#define SPEAKER_TOP_FRONT_CENTER 0x2000
+#define SPEAKER_TOP_FRONT_RIGHT 0x4000
+#define SPEAKER_TOP_BACK_LEFT 0x8000
+#define SPEAKER_TOP_BACK_CENTER 0x10000
+#define SPEAKER_TOP_BACK_RIGHT 0x20000
+#endif
+
+/*
+The SDK that comes with old versions of MSVC (VC6, for example) does not appear to define WAVEFORMATEXTENSIBLE. We
+define our own implementation in this case.
+*/
+#if (defined(_MSC_VER) && !defined(_WAVEFORMATEXTENSIBLE_)) || defined(__DMC__)
+typedef struct
+{
+ WAVEFORMATEX Format;
+ union
+ {
+ WORD wValidBitsPerSample;
+ WORD wSamplesPerBlock;
+ WORD wReserved;
+ } Samples;
+ DWORD dwChannelMask;
+ GUID SubFormat;
+} WAVEFORMATEXTENSIBLE;
+#endif
+
+#ifndef WAVE_FORMAT_EXTENSIBLE
+#define WAVE_FORMAT_EXTENSIBLE 0xFFFE
+#endif
+
+#ifndef WAVE_FORMAT_IEEE_FLOAT
+#define WAVE_FORMAT_IEEE_FLOAT 0x0003
+#endif
+
+/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */
+static ma_uint8 ma_channel_id_to_ma__win32(DWORD id)
+{
+ switch (id)
+ {
+ case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
+ case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
+ case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
+ case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE;
+ case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT;
+ case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT;
+ case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER;
+ case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
+ case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
+ case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
+ case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
+ case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
+ case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
+ default: return 0;
+ }
+}
+
+/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */
+static DWORD ma_channel_id_to_win32(DWORD id)
+{
+ switch (id)
+ {
+ case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT;
+ case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT;
+ case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY;
+ case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT;
+ case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT;
+ case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER;
+ case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER;
+ case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER;
+ case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT;
+ case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT;
+ case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER;
+ case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT;
+ case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER;
+ case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT;
+ case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT;
+ case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER;
+ case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT;
+ default: return 0;
+ }
+}
+
+/* Converts a channel mapping to a Win32-style channel mask. */
+static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel* pChannelMap, ma_uint32 channels)
+{
+ DWORD dwChannelMask = 0;
+ ma_uint32 iChannel;
+
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ dwChannelMask |= ma_channel_id_to_win32(pChannelMap[iChannel]);
+ }
+
+ return dwChannelMask;
+}
+
+/* Converts a Win32-style channel mask to a miniaudio channel map. */
+static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel* pChannelMap)
+{
+ if (channels == 1 && dwChannelMask == 0) {
+ pChannelMap[0] = MA_CHANNEL_MONO;
+ } else if (channels == 2 && dwChannelMask == 0) {
+ pChannelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ pChannelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ } else {
+ if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) {
+ pChannelMap[0] = MA_CHANNEL_MONO;
+ } else {
+ /* Just iterate over each bit. */
+ ma_uint32 iChannel = 0;
+ ma_uint32 iBit;
+
+ for (iBit = 0; iBit < 32 && iChannel < channels; ++iBit) {
+ DWORD bitValue = (dwChannelMask & (1UL << iBit));
+ if (bitValue != 0) {
+ /* The bit is set. */
+ pChannelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue);
+ iChannel += 1;
+ }
+ }
+ }
+ }
+}
+
+#ifdef __cplusplus
+static ma_bool32 ma_is_guid_equal(const void* a, const void* b)
+{
+ return IsEqualGUID(*(const GUID*)a, *(const GUID*)b);
+}
+#else
+#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b)
+#endif
+
+static MA_INLINE ma_bool32 ma_is_guid_null(const void* guid)
+{
+ static GUID nullguid = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}};
+ return ma_is_guid_equal(guid, &nullguid);
+}
+
+static ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF)
+{
+ MA_ASSERT(pWF != NULL);
+
+ if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ const WAVEFORMATEXTENSIBLE* pWFEX = (const WAVEFORMATEXTENSIBLE*)pWF;
+ if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) {
+ if (pWFEX->Samples.wValidBitsPerSample == 32) {
+ return ma_format_s32;
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 24) {
+ if (pWFEX->Format.wBitsPerSample == 32) {
+ /*return ma_format_s24_32;*/
+ }
+ if (pWFEX->Format.wBitsPerSample == 24) {
+ return ma_format_s24;
+ }
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 16) {
+ return ma_format_s16;
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 8) {
+ return ma_format_u8;
+ }
+ }
+ if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) {
+ if (pWFEX->Samples.wValidBitsPerSample == 32) {
+ return ma_format_f32;
+ }
+ /*
+ if (pWFEX->Samples.wValidBitsPerSample == 64) {
+ return ma_format_f64;
+ }
+ */
+ }
+ } else {
+ if (pWF->wFormatTag == WAVE_FORMAT_PCM) {
+ if (pWF->wBitsPerSample == 32) {
+ return ma_format_s32;
+ }
+ if (pWF->wBitsPerSample == 24) {
+ return ma_format_s24;
+ }
+ if (pWF->wBitsPerSample == 16) {
+ return ma_format_s16;
+ }
+ if (pWF->wBitsPerSample == 8) {
+ return ma_format_u8;
+ }
+ }
+ if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) {
+ if (pWF->wBitsPerSample == 32) {
+ return ma_format_f32;
+ }
+ if (pWF->wBitsPerSample == 64) {
+ /*return ma_format_f64;*/
+ }
+ }
+ }
+
+ return ma_format_unknown;
+}
+#endif
+
+
+/*******************************************************************************
+
+WASAPI Backend
+
+*******************************************************************************/
+#ifdef MA_HAS_WASAPI
+#if 0
+#if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */
+#endif
+#include <audioclient.h>
+#include <mmdeviceapi.h>
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+#endif /* 0 */
+
+static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType);
+
+/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */
+#define MA_WIN32_WINNT_VISTA 0x0600
+#define MA_VER_MINORVERSION 0x01
+#define MA_VER_MAJORVERSION 0x02
+#define MA_VER_SERVICEPACKMAJOR 0x20
+#define MA_VER_GREATER_EQUAL 0x03
+
+typedef struct {
+ DWORD dwOSVersionInfoSize;
+ DWORD dwMajorVersion;
+ DWORD dwMinorVersion;
+ DWORD dwBuildNumber;
+ DWORD dwPlatformId;
+ WCHAR szCSDVersion[128];
+ WORD wServicePackMajor;
+ WORD wServicePackMinor;
+ WORD wSuiteMask;
+ BYTE wProductType;
+ BYTE wReserved;
+} ma_OSVERSIONINFOEXW;
+
+typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask);
+typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask);
+
+
+#ifndef PROPERTYKEY_DEFINED
+#define PROPERTYKEY_DEFINED
+#ifndef __WATCOMC__
+typedef struct
+{
+ GUID fmtid;
+ DWORD pid;
+} PROPERTYKEY;
+#endif
+#endif
+
+/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */
+static MA_INLINE void ma_PropVariantInit(PROPVARIANT* pProp)
+{
+ MA_ZERO_OBJECT(pProp);
+}
+
+
+static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14};
+static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0};
+
+static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */
+#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK)
+static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */
+#endif
+
+static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */
+static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */
+static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */
+static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */
+static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */
+static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */
+#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK)
+static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */
+static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */
+static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */
+#endif
+
+static const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */
+static const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */
+#ifdef __cplusplus
+#define MA_CLSID_MMDeviceEnumerator MA_CLSID_MMDeviceEnumerator_Instance
+#define MA_IID_IMMDeviceEnumerator MA_IID_IMMDeviceEnumerator_Instance
+#else
+#define MA_CLSID_MMDeviceEnumerator &MA_CLSID_MMDeviceEnumerator_Instance
+#define MA_IID_IMMDeviceEnumerator &MA_IID_IMMDeviceEnumerator_Instance
+#endif
+
+typedef struct ma_IUnknown ma_IUnknown;
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+#define MA_MM_DEVICE_STATE_ACTIVE 1
+#define MA_MM_DEVICE_STATE_DISABLED 2
+#define MA_MM_DEVICE_STATE_NOTPRESENT 4
+#define MA_MM_DEVICE_STATE_UNPLUGGED 8
+
+typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator;
+typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection;
+typedef struct ma_IMMDevice ma_IMMDevice;
+#else
+typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler;
+typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation;
+#endif
+typedef struct ma_IPropertyStore ma_IPropertyStore;
+typedef struct ma_IAudioClient ma_IAudioClient;
+typedef struct ma_IAudioClient2 ma_IAudioClient2;
+typedef struct ma_IAudioClient3 ma_IAudioClient3;
+typedef struct ma_IAudioRenderClient ma_IAudioRenderClient;
+typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient;
+
+typedef ma_int64 MA_REFERENCE_TIME;
+
+#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000
+#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000
+#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000
+#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000
+#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000
+#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
+#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
+#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000
+#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000
+#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000
+
+/* Buffer flags. */
+#define MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY 1
+#define MA_AUDCLNT_BUFFERFLAGS_SILENT 2
+#define MA_AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR 4
+
+typedef enum
+{
+ ma_eRender = 0,
+ ma_eCapture = 1,
+ ma_eAll = 2
+} ma_EDataFlow;
+
+typedef enum
+{
+ ma_eConsole = 0,
+ ma_eMultimedia = 1,
+ ma_eCommunications = 2
+} ma_ERole;
+
+typedef enum
+{
+ MA_AUDCLNT_SHAREMODE_SHARED,
+ MA_AUDCLNT_SHAREMODE_EXCLUSIVE
+} MA_AUDCLNT_SHAREMODE;
+
+typedef enum
+{
+ MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */
+} MA_AUDIO_STREAM_CATEGORY;
+
+typedef struct
+{
+ ma_uint32 cbSize;
+ BOOL bIsOffload;
+ MA_AUDIO_STREAM_CATEGORY eCategory;
+} ma_AudioClientProperties;
+
+/* IUnknown */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis);
+} ma_IUnknownVtbl;
+struct ma_IUnknown
+{
+ ma_IUnknownVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); }
+
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ /* IMMNotificationClient */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis);
+
+ /* IMMNotificationClient */
+ HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState);
+ HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key);
+ } ma_IMMNotificationClientVtbl;
+
+ /* IMMDeviceEnumerator */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis);
+
+ /* IMMDeviceEnumerator */
+ HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices);
+ HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint);
+ HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice);
+ HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
+ HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
+ } ma_IMMDeviceEnumeratorVtbl;
+ struct ma_IMMDeviceEnumerator
+ {
+ ma_IMMDeviceEnumeratorVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); }
+
+
+ /* IMMDeviceCollection */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis);
+
+ /* IMMDeviceCollection */
+ HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices);
+ HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice);
+ } ma_IMMDeviceCollectionVtbl;
+ struct ma_IMMDeviceCollection
+ {
+ ma_IMMDeviceCollectionVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); }
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); }
+
+
+ /* IMMDevice */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis);
+
+ /* IMMDevice */
+ HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface);
+ HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties);
+ HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, LPWSTR *pID);
+ HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState);
+ } ma_IMMDeviceVtbl;
+ struct ma_IMMDevice
+ {
+ ma_IMMDeviceVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); }
+ static MA_INLINE HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); }
+ static MA_INLINE HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); }
+ static MA_INLINE HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); }
+#else
+ /* IActivateAudioInterfaceAsyncOperation */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
+
+ /* IActivateAudioInterfaceAsyncOperation */
+ HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface);
+ } ma_IActivateAudioInterfaceAsyncOperationVtbl;
+ struct ma_IActivateAudioInterfaceAsyncOperation
+ {
+ ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); }
+#endif
+
+/* IPropertyStore */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis);
+
+ /* IPropertyStore */
+ HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount);
+ HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey);
+ HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar);
+ HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar);
+ HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis);
+} ma_IPropertyStoreVtbl;
+struct ma_IPropertyStore
+{
+ ma_IPropertyStoreVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); }
+static MA_INLINE HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); }
+static MA_INLINE HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); }
+
+
+/* IAudioClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp);
+} ma_IAudioClientVtbl;
+struct ma_IAudioClient
+{
+ ma_IAudioClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+
+/* IAudioClient2 */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp);
+
+ /* IAudioClient2 */
+ HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
+ HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
+} ma_IAudioClient2Vtbl;
+struct ma_IAudioClient2
+{
+ ma_IAudioClient2Vtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+static MA_INLINE HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
+static MA_INLINE HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
+
+
+/* IAudioClient3 */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp);
+
+ /* IAudioClient2 */
+ HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
+ HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
+
+ /* IAudioClient3 */
+ HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames);
+ HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+} ma_IAudioClient3Vtbl;
+struct ma_IAudioClient3
+{
+ ma_IAudioClient3Vtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+static MA_INLINE HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
+static MA_INLINE HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); }
+
+
+/* IAudioRenderClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis);
+
+ /* IAudioRenderClient */
+ HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData);
+ HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags);
+} ma_IAudioRenderClientVtbl;
+struct ma_IAudioRenderClient
+{
+ ma_IAudioRenderClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); }
+static MA_INLINE HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); }
+
+
+/* IAudioCaptureClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis);
+
+ /* IAudioRenderClient */
+ HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition);
+ HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead);
+ HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket);
+} ma_IAudioCaptureClientVtbl;
+struct ma_IAudioCaptureClient
+{
+ ma_IAudioCaptureClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); }
+
+#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK)
+#include <mmdeviceapi.h>
+typedef struct ma_completion_handler_uwp ma_completion_handler_uwp;
+
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis);
+
+ /* IActivateAudioInterfaceCompletionHandler */
+ HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation);
+} ma_completion_handler_uwp_vtbl;
+struct ma_completion_handler_uwp
+{
+ ma_completion_handler_uwp_vtbl* lpVtbl;
+ MA_ATOMIC(4, ma_uint32) counter;
+ HANDLE hEvent;
+};
+
+static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject)
+{
+ /*
+ We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To
+ "implement" this, we just make sure we return pThis when the IAgileObject is requested.
+ */
+ if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) {
+ *ppObject = NULL;
+ return E_NOINTERFACE;
+ }
+
+ /* Getting here means the IID is IUnknown or IMMNotificationClient. */
+ *ppObject = (void*)pThis;
+ ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis);
+ return S_OK;
+}
+
+static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis)
+{
+ return (ULONG)c89atomic_fetch_add_32(&pThis->counter, 1) + 1;
+}
+
+static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis)
+{
+ ma_uint32 newRefCount = c89atomic_fetch_sub_32(&pThis->counter, 1) - 1;
+ if (newRefCount == 0) {
+ return 0; /* We don't free anything here because we never allocate the object on the heap. */
+ }
+
+ return (ULONG)newRefCount;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation)
+{
+ (void)pActivateOperation;
+ SetEvent(pThis->hEvent);
+ return S_OK;
+}
+
+
+static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = {
+ ma_completion_handler_uwp_QueryInterface,
+ ma_completion_handler_uwp_AddRef,
+ ma_completion_handler_uwp_Release,
+ ma_completion_handler_uwp_ActivateCompleted
+};
+
+static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler)
+{
+ MA_ASSERT(pHandler != NULL);
+ MA_ZERO_OBJECT(pHandler);
+
+ pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance;
+ pHandler->counter = 1;
+ pHandler->hEvent = CreateEventW(NULL, FALSE, FALSE, NULL);
+ if (pHandler->hEvent == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler)
+{
+ if (pHandler->hEvent != NULL) {
+ CloseHandle(pHandler->hEvent);
+ }
+}
+
+static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler)
+{
+ WaitForSingleObject(pHandler->hEvent, INFINITE);
+}
+#endif /* !MA_WIN32_DESKTOP */
+
+/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject)
+{
+ /*
+ We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else
+ we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK.
+ */
+ if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) {
+ *ppObject = NULL;
+ return E_NOINTERFACE;
+ }
+
+ /* Getting here means the IID is IUnknown or IMMNotificationClient. */
+ *ppObject = (void*)pThis;
+ ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis);
+ return S_OK;
+}
+
+static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis)
+{
+ return (ULONG)c89atomic_fetch_add_32(&pThis->counter, 1) + 1;
+}
+
+static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis)
+{
+ ma_uint32 newRefCount = c89atomic_fetch_sub_32(&pThis->counter, 1) - 1;
+ if (newRefCount == 0) {
+ return 0; /* We don't free anything here because we never allocate the object on the heap. */
+ }
+
+ return (ULONG)newRefCount;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState)
+{
+ ma_bool32 isThisDevice = MA_FALSE;
+ ma_bool32 isCapture = MA_FALSE;
+ ma_bool32 isPlayback = MA_FALSE;
+
+#ifdef MA_DEBUG_OUTPUT
+ /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);*/
+#endif
+
+ /*
+ There have been reports of a hang when a playback device is disconnected. The idea with this code is to explicitly stop the device if we detect
+ that the device is disabled or has been unplugged.
+ */
+ if (pThis->pDevice->wasapi.allowCaptureAutoStreamRouting && (pThis->pDevice->type == ma_device_type_capture || pThis->pDevice->type == ma_device_type_duplex || pThis->pDevice->type == ma_device_type_loopback)) {
+ isCapture = MA_TRUE;
+ if (wcscmp(pThis->pDevice->capture.id.wasapi, pDeviceID) == 0) {
+ isThisDevice = MA_TRUE;
+ }
+ }
+
+ if (pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting && (pThis->pDevice->type == ma_device_type_playback || pThis->pDevice->type == ma_device_type_duplex)) {
+ isPlayback = MA_TRUE;
+ if (wcscmp(pThis->pDevice->playback.id.wasapi, pDeviceID) == 0) {
+ isThisDevice = MA_TRUE;
+ }
+ }
+
+
+ /*
+ If the device ID matches our device we need to mark our device as detached and stop it. When a
+ device is added in OnDeviceAdded(), we'll restart it. We only mark it as detached if the device
+ was started at the time of being removed.
+ */
+ if (isThisDevice) {
+ if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) == 0) {
+ /*
+ Unplugged or otherwise unavailable. Mark as detached if we were in a playing state. We'll
+ use this to determine whether or not we need to automatically start the device when it's
+ plugged back in again.
+ */
+ if (ma_device_get_state(pThis->pDevice) == ma_device_state_started) {
+ if (isPlayback) {
+ pThis->pDevice->wasapi.isDetachedPlayback = MA_TRUE;
+ }
+ if (isCapture) {
+ pThis->pDevice->wasapi.isDetachedCapture = MA_TRUE;
+ }
+
+ ma_device_stop(pThis->pDevice);
+ }
+ }
+
+ if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) != 0) {
+ /* The device was activated. If we were detached, we need to start it again. */
+ ma_bool8 tryRestartingDevice = MA_FALSE;
+
+ if (isPlayback) {
+ if (pThis->pDevice->wasapi.isDetachedPlayback) {
+ pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE;
+ ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback);
+ tryRestartingDevice = MA_TRUE;
+ }
+ }
+
+ if (isCapture) {
+ if (pThis->pDevice->wasapi.isDetachedCapture) {
+ pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE;
+ ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture);
+ tryRestartingDevice = MA_TRUE;
+ }
+ }
+
+ if (tryRestartingDevice) {
+ if (pThis->pDevice->wasapi.isDetachedPlayback == MA_FALSE && pThis->pDevice->wasapi.isDetachedCapture == MA_FALSE) {
+ ma_device_start(pThis->pDevice);
+ }
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
+{
+#ifdef MA_DEBUG_OUTPUT
+ /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
+#endif
+
+ /* We don't need to worry about this event for our purposes. */
+ (void)pThis;
+ (void)pDeviceID;
+ return S_OK;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
+{
+#ifdef MA_DEBUG_OUTPUT
+ /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
+#endif
+
+ /* We don't need to worry about this event for our purposes. */
+ (void)pThis;
+ (void)pDeviceID;
+ return S_OK;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID)
+{
+#ifdef MA_DEBUG_OUTPUT
+ /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");*/
+#endif
+
+ /* We only ever use the eConsole role in miniaudio. */
+ if (role != ma_eConsole) {
+ ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting: role != eConsole\n");
+ return S_OK;
+ }
+
+ /* We only care about devices with the same data flow and role as the current device. */
+ if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) ||
+ (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture)) {
+ ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because dataFlow does match device type.\n");
+ return S_OK;
+ }
+
+ /* Don't do automatic stream routing if we're not allowed. */
+ if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) ||
+ (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) {
+ ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because automatic stream routing has been disabled by the device config.\n");
+ return S_OK;
+ }
+
+ /*
+ Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to
+ AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once
+ it's fixed.
+ */
+ if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) ||
+ (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) {
+ ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because the device shared mode is exclusive.\n");
+ return S_OK;
+ }
+
+
+
+
+ /*
+ Second attempt at device rerouting. We're going to retrieve the device's state at the time of
+ the route change. We're then going to stop the device, reinitialize the device, and then start
+ it again if the state before stopping was ma_device_state_started.
+ */
+ {
+ ma_uint32 previousState = ma_device_get_state(pThis->pDevice);
+ ma_bool8 restartDevice = MA_FALSE;
+
+ if (previousState == ma_device_state_started) {
+ ma_device_stop(pThis->pDevice);
+ restartDevice = MA_TRUE;
+ }
+
+ if (pDefaultDeviceID != NULL) { /* <-- The input device ID will be null if there's no other device available. */
+ if (dataFlow == ma_eRender) {
+ ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback);
+
+ if (pThis->pDevice->wasapi.isDetachedPlayback) {
+ pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE;
+
+ if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedCapture) {
+ restartDevice = MA_FALSE; /* It's a duplex device and the capture side is detached. We cannot be restarting the device just yet. */
+ } else {
+ restartDevice = MA_TRUE; /* It's not a duplex device, or the capture side is also attached so we can go ahead and restart the device. */
+ }
+ }
+ } else {
+ ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture);
+
+ if (pThis->pDevice->wasapi.isDetachedCapture) {
+ pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE;
+
+ if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedPlayback) {
+ restartDevice = MA_FALSE; /* It's a duplex device and the playback side is detached. We cannot be restarting the device just yet. */
+ } else {
+ restartDevice = MA_TRUE; /* It's not a duplex device, or the playback side is also attached so we can go ahead and restart the device. */
+ }
+ }
+ }
+
+ if (restartDevice) {
+ ma_device_start(pThis->pDevice);
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key)
+{
+#ifdef MA_DEBUG_OUTPUT
+ /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
+#endif
+
+ (void)pThis;
+ (void)pDeviceID;
+ (void)key;
+ return S_OK;
+}
+
+static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = {
+ ma_IMMNotificationClient_QueryInterface,
+ ma_IMMNotificationClient_AddRef,
+ ma_IMMNotificationClient_Release,
+ ma_IMMNotificationClient_OnDeviceStateChanged,
+ ma_IMMNotificationClient_OnDeviceAdded,
+ ma_IMMNotificationClient_OnDeviceRemoved,
+ ma_IMMNotificationClient_OnDefaultDeviceChanged,
+ ma_IMMNotificationClient_OnPropertyValueChanged
+};
+#endif /* MA_WIN32_DESKTOP */
+
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+typedef ma_IMMDevice ma_WASAPIDeviceInterface;
+#else
+typedef ma_IUnknown ma_WASAPIDeviceInterface;
+#endif
+
+
+#define MA_CONTEXT_COMMAND_QUIT__WASAPI 1
+#define MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI 2
+#define MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI 3
+
+static ma_context_command__wasapi ma_context_init_command__wasapi(int code)
+{
+ ma_context_command__wasapi cmd;
+
+ MA_ZERO_OBJECT(&cmd);
+ cmd.code = code;
+
+ return cmd;
+}
+
+static ma_result ma_context_post_command__wasapi(ma_context* pContext, const ma_context_command__wasapi* pCmd)
+{
+ /* For now we are doing everything synchronously, but I might relax this later if the need arises. */
+ ma_result result;
+ ma_bool32 isUsingLocalEvent = MA_FALSE;
+ ma_event localEvent;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCmd != NULL);
+
+ if (pCmd->pEvent == NULL) {
+ isUsingLocalEvent = MA_TRUE;
+
+ result = ma_event_init(&localEvent);
+ if (result != MA_SUCCESS) {
+ return result; /* Failed to create the event for this command. */
+ }
+ }
+
+ /* Here is where we add the command to the list. If there's not enough room we'll spin until there is. */
+ ma_mutex_lock(&pContext->wasapi.commandLock);
+ {
+ ma_uint32 index;
+
+ /* Spin until we've got some space available. */
+ while (pContext->wasapi.commandCount == ma_countof(pContext->wasapi.commands)) {
+ ma_yield();
+ }
+
+ /* Space is now available. Can safely add to the list. */
+ index = (pContext->wasapi.commandIndex + pContext->wasapi.commandCount) % ma_countof(pContext->wasapi.commands);
+ pContext->wasapi.commands[index] = *pCmd;
+ pContext->wasapi.commands[index].pEvent = &localEvent;
+ pContext->wasapi.commandCount += 1;
+
+ /* Now that the command has been added, release the semaphore so ma_context_next_command__wasapi() can return. */
+ ma_semaphore_release(&pContext->wasapi.commandSem);
+ }
+ ma_mutex_unlock(&pContext->wasapi.commandLock);
+
+ if (isUsingLocalEvent) {
+ ma_event_wait(&localEvent);
+ ma_event_uninit(&localEvent);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_next_command__wasapi(ma_context* pContext, ma_context_command__wasapi* pCmd)
+{
+ ma_result result = MA_SUCCESS;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCmd != NULL);
+
+ result = ma_semaphore_wait(&pContext->wasapi.commandSem);
+ if (result == MA_SUCCESS) {
+ ma_mutex_lock(&pContext->wasapi.commandLock);
+ {
+ *pCmd = pContext->wasapi.commands[pContext->wasapi.commandIndex];
+ pContext->wasapi.commandIndex = (pContext->wasapi.commandIndex + 1) % ma_countof(pContext->wasapi.commands);
+ pContext->wasapi.commandCount -= 1;
+ }
+ ma_mutex_unlock(&pContext->wasapi.commandLock);
+ }
+
+ return result;
+}
+
+static ma_thread_result MA_THREADCALL ma_context_command_thread__wasapi(void* pUserData)
+{
+ ma_result result;
+ ma_context* pContext = (ma_context*)pUserData;
+ MA_ASSERT(pContext != NULL);
+
+ for (;;) {
+ ma_context_command__wasapi cmd;
+ result = ma_context_next_command__wasapi(pContext, &cmd);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ switch (cmd.code)
+ {
+ case MA_CONTEXT_COMMAND_QUIT__WASAPI:
+ {
+ /* Do nothing. Handled after the switch. */
+ } break;
+
+ case MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI:
+ {
+ if (cmd.data.createAudioClient.deviceType == ma_device_type_playback) {
+ *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioRenderClient, cmd.data.createAudioClient.ppAudioClientService));
+ } else {
+ *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioCaptureClient, cmd.data.createAudioClient.ppAudioClientService));
+ }
+ } break;
+
+ case MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI:
+ {
+ if (cmd.data.releaseAudioClient.deviceType == ma_device_type_playback) {
+ if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback);
+ cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback = NULL;
+ }
+ }
+
+ if (cmd.data.releaseAudioClient.deviceType == ma_device_type_capture) {
+ if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture);
+ cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture = NULL;
+ }
+ }
+ } break;
+
+ default:
+ {
+ /* Unknown command. Ignore it, but trigger an assert in debug mode so we're aware of it. */
+ MA_ASSERT(MA_FALSE);
+ } break;
+ }
+
+ if (cmd.pEvent != NULL) {
+ ma_event_signal(cmd.pEvent);
+ }
+
+ if (cmd.code == MA_CONTEXT_COMMAND_QUIT__WASAPI) {
+ break; /* Received a quit message. Get out of here. */
+ }
+ }
+
+ return (ma_thread_result)0;
+}
+
+static ma_result ma_device_create_IAudioClient_service__wasapi(ma_context* pContext, ma_device_type deviceType, ma_IAudioClient* pAudioClient, void** ppAudioClientService)
+{
+ ma_result result;
+ ma_result cmdResult;
+ ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI);
+ cmd.data.createAudioClient.deviceType = deviceType;
+ cmd.data.createAudioClient.pAudioClient = (void*)pAudioClient;
+ cmd.data.createAudioClient.ppAudioClientService = ppAudioClientService;
+ cmd.data.createAudioClient.pResult = &cmdResult; /* Declared locally, but won't be dereferenced after this function returns since execution of the command will wait here. */
+
+ result = ma_context_post_command__wasapi(pContext, &cmd); /* This will not return until the command has actually been run. */
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return *cmd.data.createAudioClient.pResult;
+}
+
+#if 0 /* Not used at the moment, but leaving here for future use. */
+static ma_result ma_device_release_IAudioClient_service__wasapi(ma_device* pDevice, ma_device_type deviceType)
+{
+ ma_result result;
+ ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI);
+ cmd.data.releaseAudioClient.pDevice = pDevice;
+ cmd.data.releaseAudioClient.deviceType = deviceType;
+
+ result = ma_context_post_command__wasapi(pDevice->pContext, &cmd); /* This will not return until the command has actually been run. */
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return MA_SUCCESS;
+}
+#endif
+
+
+static void ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_share_mode shareMode, ma_device_info* pInfo)
+{
+ MA_ASSERT(pWF != NULL);
+ MA_ASSERT(pInfo != NULL);
+
+ if (pInfo->nativeDataFormatCount >= ma_countof(pInfo->nativeDataFormats)) {
+ return; /* Too many data formats. Need to ignore this one. Don't think this should ever happen with WASAPI. */
+ }
+
+ pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].format = ma_format_from_WAVEFORMATEX(pWF);
+ pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].channels = pWF->nChannels;
+ pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].sampleRate = pWF->nSamplesPerSec;
+ pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].flags = (shareMode == ma_share_mode_exclusive) ? MA_DATA_FORMAT_FLAG_EXCLUSIVE_MODE : 0;
+ pInfo->nativeDataFormatCount += 1;
+}
+
+static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_device_info* pInfo)
+{
+ HRESULT hr;
+ WAVEFORMATEX* pWF = NULL;
+
+ MA_ASSERT(pAudioClient != NULL);
+ MA_ASSERT(pInfo != NULL);
+
+ /* Shared Mode. We use GetMixFormat() here. */
+ hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (WAVEFORMATEX**)&pWF);
+ if (SUCCEEDED(hr)) {
+ ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_shared, pInfo);
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /*
+ Exlcusive Mode. We repeatedly call IsFormatSupported() here. This is not currently supported on
+ UWP. Failure to retrieve the exclusive mode format is not considered an error, so from here on
+ out, MA_SUCCESS is guaranteed to be returned.
+ */
+ #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ {
+ ma_IPropertyStore *pProperties;
+
+ /*
+ The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is
+ correct which will simplify our searching.
+ */
+ hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT var;
+ ma_PropVariantInit(&var);
+
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var);
+ if (SUCCEEDED(hr)) {
+ pWF = (WAVEFORMATEX*)var.blob.pBlobData;
+
+ /*
+ In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format
+ first. If this fails, fall back to a search.
+ */
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL);
+ if (SUCCEEDED(hr)) {
+ /* The format returned by PKEY_AudioEngine_DeviceFormat is supported. */
+ ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_exclusive, pInfo);
+ } else {
+ /*
+ The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel
+ count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format.
+ */
+ ma_uint32 channels = pWF->nChannels;
+ ma_channel defaultChannelMap[MA_MAX_CHANNELS];
+ WAVEFORMATEXTENSIBLE wf;
+ ma_bool32 found;
+ ma_uint32 iFormat;
+
+ /* Make sure we don't overflow the channel map. */
+ if (channels > MA_MAX_CHANNELS) {
+ channels = MA_MAX_CHANNELS;
+ }
+
+ ma_channel_map_init_standard(ma_standard_channel_map_microsoft, defaultChannelMap, ma_countof(defaultChannelMap), channels);
+
+ MA_ZERO_OBJECT(&wf);
+ wf.Format.cbSize = sizeof(wf);
+ wf.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ wf.Format.nChannels = (WORD)channels;
+ wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels);
+
+ found = MA_FALSE;
+ for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) {
+ ma_format format = g_maFormatPriorities[iFormat];
+ ma_uint32 iSampleRate;
+
+ wf.Format.wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8);
+ wf.Format.nBlockAlign = (WORD)(wf.Format.nChannels * wf.Format.wBitsPerSample / 8);
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+ wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.Format.wBitsPerSample;
+ if (format == ma_format_f32) {
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ } else {
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ }
+
+ for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) {
+ wf.Format.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate];
+
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&wf, NULL);
+ if (SUCCEEDED(hr)) {
+ ma_add_native_data_format_to_device_info_from_WAVEFORMATEX((WAVEFORMATEX*)&wf, ma_share_mode_exclusive, pInfo);
+ found = MA_TRUE;
+ break;
+ }
+ }
+
+ if (found) {
+ break;
+ }
+ }
+
+ ma_PropVariantClear(pContext, &var);
+
+ if (!found) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to find suitable device format for device info retrieval.");
+ }
+ }
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to retrieve device format for device info retrieval.");
+ }
+
+ ma_IPropertyStore_Release(pProperties);
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to open property store for device info retrieval.");
+ }
+ }
+ #endif
+
+ return MA_SUCCESS;
+}
+
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+static ma_EDataFlow ma_device_type_to_EDataFlow(ma_device_type deviceType)
+{
+ if (deviceType == ma_device_type_playback) {
+ return ma_eRender;
+ } else if (deviceType == ma_device_type_capture) {
+ return ma_eCapture;
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return ma_eRender; /* Should never hit this. */
+ }
+}
+
+static ma_result ma_context_create_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator** ppDeviceEnumerator)
+{
+ HRESULT hr;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDeviceEnumerator != NULL);
+
+ *ppDeviceEnumerator = NULL; /* Safety. */
+
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ *ppDeviceEnumerator = pDeviceEnumerator;
+
+ return MA_SUCCESS;
+}
+
+static LPWSTR ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType)
+{
+ HRESULT hr;
+ ma_IMMDevice* pMMDefaultDevice = NULL;
+ LPWSTR pDefaultDeviceID = NULL;
+ ma_EDataFlow dataFlow;
+ ma_ERole role;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceEnumerator != NULL);
+
+ (void)pContext;
+
+ /* Grab the EDataFlow type from the device type. */
+ dataFlow = ma_device_type_to_EDataFlow(deviceType);
+
+ /* The role is always eConsole, but we may make this configurable later. */
+ role = ma_eConsole;
+
+ hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, dataFlow, role, &pMMDefaultDevice);
+ if (FAILED(hr)) {
+ return NULL;
+ }
+
+ hr = ma_IMMDevice_GetId(pMMDefaultDevice, &pDefaultDeviceID);
+
+ ma_IMMDevice_Release(pMMDefaultDevice);
+ pMMDefaultDevice = NULL;
+
+ if (FAILED(hr)) {
+ return NULL;
+ }
+
+ return pDefaultDeviceID;
+}
+
+static LPWSTR ma_context_get_default_device_id__wasapi(ma_context* pContext, ma_device_type deviceType) /* Free the returned pointer with ma_CoTaskMemFree() */
+{
+ ma_result result;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+ LPWSTR pDefaultDeviceID = NULL;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_context_create_IMMDeviceEnumerator__wasapi(pContext, &pDeviceEnumerator);
+ if (result != MA_SUCCESS) {
+ return NULL;
+ }
+
+ pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType);
+
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ return pDefaultDeviceID;
+}
+
+static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice)
+{
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+ HRESULT hr;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppMMDevice != NULL);
+
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if (pDeviceID == NULL) {
+ hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_capture) ? ma_eCapture : ma_eRender, ma_eConsole, ppMMDevice);
+ } else {
+ hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice);
+ }
+
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_id_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_device_id* pDeviceID)
+{
+ LPWSTR pDeviceIDString;
+ HRESULT hr;
+
+ MA_ASSERT(pDeviceID != NULL);
+
+ hr = ma_IMMDevice_GetId(pMMDevice, &pDeviceIDString);
+ if (SUCCEEDED(hr)) {
+ size_t idlen = wcslen(pDeviceIDString);
+ if (idlen+1 > ma_countof(pDeviceID->wasapi)) {
+ ma_CoTaskMemFree(pContext, pDeviceIDString);
+ MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */
+ return MA_ERROR;
+ }
+
+ MA_COPY_MEMORY(pDeviceID->wasapi, pDeviceIDString, idlen * sizeof(wchar_t));
+ pDeviceID->wasapi[idlen] = '\0';
+
+ ma_CoTaskMemFree(pContext, pDeviceIDString);
+
+ return MA_SUCCESS;
+ }
+
+ return MA_ERROR;
+}
+
+static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, LPWSTR pDefaultDeviceID, ma_bool32 onlySimpleInfo, ma_device_info* pInfo)
+{
+ ma_result result;
+ HRESULT hr;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pMMDevice != NULL);
+ MA_ASSERT(pInfo != NULL);
+
+ /* ID. */
+ result = ma_context_get_device_id_from_MMDevice__wasapi(pContext, pMMDevice, &pInfo->id);
+ if (result == MA_SUCCESS) {
+ if (pDefaultDeviceID != NULL) {
+ if (wcscmp(pInfo->id.wasapi, pDefaultDeviceID) == 0) {
+ pInfo->isDefault = MA_TRUE;
+ }
+ }
+ }
+
+ /* Description / Friendly Name */
+ {
+ ma_IPropertyStore *pProperties;
+ hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT var;
+
+ ma_PropVariantInit(&var);
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var);
+ if (SUCCEEDED(hr)) {
+ WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE);
+ ma_PropVariantClear(pContext, &var);
+ }
+
+ ma_IPropertyStore_Release(pProperties);
+ }
+ }
+
+ /* Format */
+ if (!onlySimpleInfo) {
+ ma_IAudioClient* pAudioClient;
+ hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
+ if (SUCCEEDED(hr)) {
+ result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, pInfo);
+
+ ma_IAudioClient_Release(pAudioClient);
+ return result;
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval.");
+ return ma_result_from_HRESULT(hr);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_enumerate_devices_by_type__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_result result = MA_SUCCESS;
+ UINT deviceCount;
+ HRESULT hr;
+ ma_uint32 iDevice;
+ LPWSTR pDefaultDeviceID = NULL;
+ ma_IMMDeviceCollection* pDeviceCollection = NULL;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Grab the default device. We use this to know whether or not flag the returned device info as being the default. */
+ pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType);
+
+ /* We need to enumerate the devices which returns a device collection. */
+ hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_device_type_to_EDataFlow(deviceType), MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection);
+ if (SUCCEEDED(hr)) {
+ hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get device count.");
+ result = ma_result_from_HRESULT(hr);
+ goto done;
+ }
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ ma_device_info deviceInfo;
+ ma_IMMDevice* pMMDevice;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice);
+ if (SUCCEEDED(hr)) {
+ result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */
+
+ ma_IMMDevice_Release(pMMDevice);
+ if (result == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+done:
+ if (pDefaultDeviceID != NULL) {
+ ma_CoTaskMemFree(pContext, pDefaultDeviceID);
+ pDefaultDeviceID = NULL;
+ }
+
+ if (pDeviceCollection != NULL) {
+ ma_IMMDeviceCollection_Release(pDeviceCollection);
+ pDeviceCollection = NULL;
+ }
+
+ return result;
+}
+
+static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice)
+{
+ ma_result result;
+ HRESULT hr;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppAudioClient != NULL);
+ MA_ASSERT(ppMMDevice != NULL);
+
+ result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)ppAudioClient);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
+ }
+
+ return MA_SUCCESS;
+}
+#else
+static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface)
+{
+ ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL;
+ ma_completion_handler_uwp completionHandler;
+ IID iid;
+ LPOLESTR iidStr;
+ HRESULT hr;
+ ma_result result;
+ HRESULT activateResult;
+ ma_IUnknown* pActivatedInterface;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppAudioClient != NULL);
+
+ if (pDeviceID != NULL) {
+ MA_COPY_MEMORY(&iid, pDeviceID->wasapi, sizeof(iid));
+ } else {
+ if (deviceType == ma_device_type_playback) {
+ iid = MA_IID_DEVINTERFACE_AUDIO_RENDER;
+ } else {
+ iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE;
+ }
+ }
+
+#if defined(__cplusplus)
+ hr = StringFromIID(iid, &iidStr);
+#else
+ hr = StringFromIID(&iid, &iidStr);
+#endif
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ result = ma_completion_handler_uwp_init(&completionHandler);
+ if (result != MA_SUCCESS) {
+ ma_CoTaskMemFree(pContext, iidStr);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().");
+ return result;
+ }
+
+#if defined(__cplusplus)
+ hr = ActivateAudioInterfaceAsync(iidStr, MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
+#else
+ hr = ActivateAudioInterfaceAsync(iidStr, &MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
+#endif
+ if (FAILED(hr)) {
+ ma_completion_handler_uwp_uninit(&completionHandler);
+ ma_CoTaskMemFree(pContext, iidStr);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ ma_CoTaskMemFree(pContext, iidStr);
+
+ /* Wait for the async operation for finish. */
+ ma_completion_handler_uwp_wait(&completionHandler);
+ ma_completion_handler_uwp_uninit(&completionHandler);
+
+ hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface);
+ ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp);
+
+ if (FAILED(hr) || FAILED(activateResult)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.");
+ return FAILED(hr) ? ma_result_from_HRESULT(hr) : ma_result_from_HRESULT(activateResult);
+ }
+
+ /* Here is where we grab the IAudioClient interface. */
+ hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if (ppActivatedInterface) {
+ *ppActivatedInterface = pActivatedInterface;
+ } else {
+ ma_IUnknown_Release(pActivatedInterface);
+ }
+
+ return MA_SUCCESS;
+}
+#endif
+
+static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface)
+{
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ return ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
+#else
+ return ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
+#endif
+}
+
+
+static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ /* Different enumeration for desktop and UWP. */
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ /* Desktop */
+ HRESULT hr;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_playback, callback, pUserData);
+ ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_capture, callback, pUserData);
+
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+#else
+ /*
+ UWP
+
+ The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate
+ over devices without using MMDevice, I'm restricting devices to defaults.
+
+ Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/
+ */
+ if (callback) {
+ ma_bool32 cbResult = MA_TRUE;
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE;
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE;
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+ }
+#endif
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ ma_result result;
+ ma_IMMDevice* pMMDevice = NULL;
+ LPWSTR pDefaultDeviceID = NULL;
+
+ result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* We need the default device ID so we can set the isDefault flag in the device info. */
+ pDefaultDeviceID = ma_context_get_default_device_id__wasapi(pContext, deviceType);
+
+ result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */
+
+ if (pDefaultDeviceID != NULL) {
+ ma_CoTaskMemFree(pContext, pDefaultDeviceID);
+ pDefaultDeviceID = NULL;
+ }
+
+ ma_IMMDevice_Release(pMMDevice);
+
+ return result;
+#else
+ ma_IAudioClient* pAudioClient;
+ ma_result result;
+
+ /* UWP currently only uses default devices. */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+
+ result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, &pAudioClient, NULL);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, pDeviceInfo);
+
+ pDeviceInfo->isDefault = MA_TRUE; /* UWP only supports default devices. */
+
+ ma_IAudioClient_Release(pAudioClient);
+ return result;
+#endif
+}
+
+static ma_result ma_device_uninit__wasapi(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ if (pDevice->wasapi.pDeviceEnumerator) {
+ ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient);
+ ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator);
+ }
+#endif
+
+ if (pDevice->wasapi.pRenderClient) {
+ if (pDevice->wasapi.pMappedBufferPlayback != NULL) {
+ ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0);
+ pDevice->wasapi.pMappedBufferPlayback = NULL;
+ pDevice->wasapi.mappedBufferPlaybackCap = 0;
+ pDevice->wasapi.mappedBufferPlaybackLen = 0;
+ }
+
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ }
+ if (pDevice->wasapi.pCaptureClient) {
+ if (pDevice->wasapi.pMappedBufferCapture != NULL) {
+ ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap);
+ pDevice->wasapi.pMappedBufferCapture = NULL;
+ pDevice->wasapi.mappedBufferCaptureCap = 0;
+ pDevice->wasapi.mappedBufferCaptureLen = 0;
+ }
+
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ }
+
+ if (pDevice->wasapi.pAudioClientPlayback) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ }
+ if (pDevice->wasapi.pAudioClientCapture) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ }
+
+ if (pDevice->wasapi.hEventPlayback) {
+ CloseHandle(pDevice->wasapi.hEventPlayback);
+ }
+ if (pDevice->wasapi.hEventCapture) {
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ }
+
+ return MA_SUCCESS;
+}
+
+
+typedef struct
+{
+ /* Input. */
+ ma_format formatIn;
+ ma_uint32 channelsIn;
+ ma_uint32 sampleRateIn;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesIn;
+ ma_uint32 periodSizeInMillisecondsIn;
+ ma_uint32 periodsIn;
+ ma_share_mode shareMode;
+ ma_performance_profile performanceProfile;
+ ma_bool32 noAutoConvertSRC;
+ ma_bool32 noDefaultQualitySRC;
+ ma_bool32 noHardwareOffloading;
+
+ /* Output. */
+ ma_IAudioClient* pAudioClient;
+ ma_IAudioRenderClient* pRenderClient;
+ ma_IAudioCaptureClient* pCaptureClient;
+ ma_format formatOut;
+ ma_uint32 channelsOut;
+ ma_uint32 sampleRateOut;
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesOut;
+ ma_uint32 periodsOut;
+ ma_bool32 usingAudioClient3;
+ char deviceName[256];
+ ma_device_id id;
+} ma_device_init_internal_data__wasapi;
+
+static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData)
+{
+ HRESULT hr;
+ ma_result result = MA_SUCCESS;
+ const char* errorMsg = "";
+ MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
+ DWORD streamFlags = 0;
+ MA_REFERENCE_TIME periodDurationInMicroseconds;
+ ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE;
+ WAVEFORMATEXTENSIBLE wf;
+ ma_WASAPIDeviceInterface* pDeviceInterface = NULL;
+ ma_IAudioClient2* pAudioClient2;
+ ma_uint32 nativeSampleRate;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pData != NULL);
+
+ /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ pData->pAudioClient = NULL;
+ pData->pRenderClient = NULL;
+ pData->pCaptureClient = NULL;
+
+ streamFlags = MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ if (!pData->noAutoConvertSRC && pData->sampleRateIn != 0 && pData->shareMode != ma_share_mode_exclusive) { /* <-- Exclusive streams must use the native sample rate. */
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ }
+ if (!pData->noDefaultQualitySRC && pData->sampleRateIn != 0 && (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) != 0) {
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ if (deviceType == ma_device_type_loopback) {
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_LOOPBACK;
+ }
+
+ result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, &pData->pAudioClient, &pDeviceInterface);
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
+
+ MA_ZERO_OBJECT(&wf);
+
+ /* Try enabling hardware offloading. */
+ if (!pData->noHardwareOffloading) {
+ hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2);
+ if (SUCCEEDED(hr)) {
+ BOOL isHardwareOffloadingSupported = 0;
+ hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported);
+ if (SUCCEEDED(hr) && isHardwareOffloadingSupported) {
+ ma_AudioClientProperties clientProperties;
+ MA_ZERO_OBJECT(&clientProperties);
+ clientProperties.cbSize = sizeof(clientProperties);
+ clientProperties.bIsOffload = 1;
+ clientProperties.eCategory = MA_AudioCategory_Other;
+ ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties);
+ }
+
+ pAudioClient2->lpVtbl->Release(pAudioClient2);
+ }
+ }
+
+ /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */
+ result = MA_FORMAT_NOT_SUPPORTED;
+ if (pData->shareMode == ma_share_mode_exclusive) {
+ #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ /* In exclusive mode on desktop we always use the backend's native format. */
+ ma_IPropertyStore* pStore = NULL;
+ hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT prop;
+ ma_PropVariantInit(&prop);
+ hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop);
+ if (SUCCEEDED(hr)) {
+ WAVEFORMATEX* pActualFormat = (WAVEFORMATEX*)prop.blob.pBlobData;
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL);
+ if (SUCCEEDED(hr)) {
+ MA_COPY_MEMORY(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE));
+ }
+
+ ma_PropVariantClear(pContext, &prop);
+ }
+
+ ma_IPropertyStore_Release(pStore);
+ }
+ #else
+ /*
+ I do not know how to query the device's native format on UWP so for now I'm just disabling support for
+ exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported()
+ until you find one that works.
+
+ TODO: Add support for exclusive mode to UWP.
+ */
+ hr = S_FALSE;
+ #endif
+
+ if (hr == S_OK) {
+ shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE;
+ result = MA_SUCCESS;
+ } else {
+ result = MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+ } else {
+ /* In shared mode we are always using the format reported by the operating system. */
+ WAVEFORMATEXTENSIBLE* pNativeFormat = NULL;
+ hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (WAVEFORMATEX**)&pNativeFormat);
+ if (hr != S_OK) {
+ result = MA_FORMAT_NOT_SUPPORTED;
+ } else {
+ MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(wf));
+ result = MA_SUCCESS;
+ }
+
+ ma_CoTaskMemFree(pContext, pNativeFormat);
+
+ shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
+ }
+
+ /* Return an error if we still haven't found a format. */
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WASAPI] Failed to find best device mix format.";
+ goto done;
+ }
+
+ /*
+ Override the native sample rate with the one requested by the caller, but only if we're not using the default sample rate. We'll use
+ WASAPI to perform the sample rate conversion.
+ */
+ nativeSampleRate = wf.Format.nSamplesPerSec;
+ if (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) {
+ wf.Format.nSamplesPerSec = (pData->sampleRateIn != 0) ? pData->sampleRateIn : MA_DEFAULT_SAMPLE_RATE;
+ wf.Format.nAvgBytesPerSec = wf.Format.nSamplesPerSec * wf.Format.nBlockAlign;
+ }
+
+ pData->formatOut = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)&wf);
+ if (pData->formatOut == ma_format_unknown) {
+ /*
+ The format isn't supported. This is almost certainly because the exclusive mode format isn't supported by miniaudio. We need to return MA_SHARE_MODE_NOT_SUPPORTED
+ in this case so that the caller can detect it and fall back to shared mode if desired. We should never get here if shared mode was requested, but just for
+ completeness we'll check for it and return MA_FORMAT_NOT_SUPPORTED.
+ */
+ if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) {
+ result = MA_SHARE_MODE_NOT_SUPPORTED;
+ } else {
+ result = MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ errorMsg = "[WASAPI] Native format not supported.";
+ goto done;
+ }
+
+ pData->channelsOut = wf.Format.nChannels;
+ pData->sampleRateOut = wf.Format.nSamplesPerSec;
+
+ /* Get the internal channel map based on the channel mask. */
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut);
+
+ /* Period size. */
+ pData->periodsOut = (pData->periodsIn != 0) ? pData->periodsIn : MA_DEFAULT_PERIODS;
+ pData->periodSizeInFramesOut = pData->periodSizeInFramesIn;
+ if (pData->periodSizeInFramesOut == 0) {
+ if (pData->periodSizeInMillisecondsIn == 0) {
+ if (pData->performanceProfile == ma_performance_profile_low_latency) {
+ pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, wf.Format.nSamplesPerSec);
+ } else {
+ pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, wf.Format.nSamplesPerSec);
+ }
+ } else {
+ pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, wf.Format.nSamplesPerSec);
+ }
+ }
+
+ periodDurationInMicroseconds = ((ma_uint64)pData->periodSizeInFramesOut * 1000 * 1000) / wf.Format.nSamplesPerSec;
+
+
+ /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */
+ if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) {
+ MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10;
+
+ /*
+ If the periodicy is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing
+ it and trying it again.
+ */
+ hr = E_FAIL;
+ for (;;) {
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
+ if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) {
+ if (bufferDuration > 500*10000) {
+ break;
+ } else {
+ if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinit loop. Should never happen, but it makes me feel better. */
+ break;
+ }
+
+ bufferDuration = bufferDuration * 2;
+ continue;
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
+ ma_uint32 bufferSizeInFrames;
+ hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames);
+ if (SUCCEEDED(hr)) {
+ bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.Format.nSamplesPerSec * bufferSizeInFrames) + 0.5);
+
+ /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */
+ ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
+
+ #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient);
+ #else
+ hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient);
+ #endif
+
+ if (SUCCEEDED(hr)) {
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
+ }
+ }
+ }
+
+ if (FAILED(hr)) {
+ /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */
+ if (hr == E_ACCESSDENIED) {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED;
+ } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_BUSY;
+ } else {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = ma_result_from_HRESULT(hr);
+ }
+ goto done;
+ }
+ }
+
+ if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) {
+ /*
+ Low latency shared mode via IAudioClient3.
+
+ NOTE
+ ====
+ Contrary to the documentation on MSDN (https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient3-initializesharedaudiostream), the
+ use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM and AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY with IAudioClient3_InitializeSharedAudioStream() absolutely does not work. Using
+ any of these flags will result in HRESULT code 0x88890021. The other problem is that calling IAudioClient3_GetSharedModeEnginePeriod() with a sample rate different to
+ that returned by IAudioClient_GetMixFormat() also results in an error. I'm therefore disabling low-latency shared mode with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM.
+ */
+ #ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE
+ {
+ if ((streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) == 0 || nativeSampleRate == wf.Format.nSamplesPerSec) {
+ ma_IAudioClient3* pAudioClient3 = NULL;
+ hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3);
+ if (SUCCEEDED(hr)) {
+ ma_uint32 defaultPeriodInFrames;
+ ma_uint32 fundamentalPeriodInFrames;
+ ma_uint32 minPeriodInFrames;
+ ma_uint32 maxPeriodInFrames;
+ hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames);
+ if (SUCCEEDED(hr)) {
+ ma_uint32 desiredPeriodInFrames = pData->periodSizeInFramesOut;
+ ma_uint32 actualPeriodInFrames = desiredPeriodInFrames;
+
+ /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */
+ actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames;
+ actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames;
+
+ /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */
+ actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames);
+
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " defaultPeriodInFrames=%d\n", defaultPeriodInFrames);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " minPeriodInFrames=%d\n", minPeriodInFrames);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " maxPeriodInFrames=%d\n", maxPeriodInFrames);
+
+ /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */
+ if (actualPeriodInFrames >= desiredPeriodInFrames) {
+ /*
+ MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified,
+ IAudioClient3_InitializeSharedAudioStream() will fail.
+ */
+ hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (WAVEFORMATEX*)&wf, NULL);
+ if (SUCCEEDED(hr)) {
+ wasInitializedUsingIAudioClient3 = MA_TRUE;
+ pData->periodSizeInFramesOut = actualPeriodInFrames;
+
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Using IAudioClient3\n");
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut);
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n");
+ }
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n");
+ }
+ } else {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n");
+ }
+
+ ma_IAudioClient3_Release(pAudioClient3);
+ pAudioClient3 = NULL;
+ }
+ }
+ }
+ #else
+ {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n");
+ }
+ #endif
+
+ /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */
+ if (!wasInitializedUsingIAudioClient3) {
+ MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (WAVEFORMATEX*)&wf, NULL);
+ if (FAILED(hr)) {
+ if (hr == E_ACCESSDENIED) {
+ errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED;
+ } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
+ errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_BUSY;
+ } else {
+ errorMsg = "[WASAPI] Failed to initialize device.", result = ma_result_from_HRESULT(hr);
+ }
+
+ goto done;
+ }
+ }
+ }
+
+ if (!wasInitializedUsingIAudioClient3) {
+ ma_uint32 bufferSizeInFrames;
+ hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames);
+ if (FAILED(hr)) {
+ errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = ma_result_from_HRESULT(hr);
+ goto done;
+ }
+
+ pData->periodSizeInFramesOut = bufferSizeInFrames / pData->periodsOut;
+ }
+
+ pData->usingAudioClient3 = wasInitializedUsingIAudioClient3;
+
+
+ if (deviceType == ma_device_type_playback) {
+ result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pRenderClient);
+ } else {
+ result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pCaptureClient);
+ }
+
+ /*if (FAILED(hr)) {*/
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WASAPI] Failed to get audio client service.";
+ goto done;
+ }
+
+
+ /* Grab the name of the device. */
+ #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ {
+ ma_IPropertyStore *pProperties;
+ hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT varName;
+ ma_PropVariantInit(&varName);
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName);
+ if (SUCCEEDED(hr)) {
+ WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE);
+ ma_PropVariantClear(pContext, &varName);
+ }
+
+ ma_IPropertyStore_Release(pProperties);
+ }
+ }
+ #endif
+
+ /*
+ For the WASAPI backend we need to know the actual IDs of the device in order to do automatic
+ stream routing so that IDs can be compared and we can determine which device has been detached
+ and whether or not it matches with our ma_device.
+ */
+ #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ {
+ /* Desktop */
+ ma_context_get_device_id_from_MMDevice__wasapi(pContext, pDeviceInterface, &pData->id);
+ }
+ #else
+ {
+ /* UWP */
+ /* TODO: Implement me. Need to figure out how to get the ID of the default device. */
+ }
+ #endif
+
+done:
+ /* Clean up. */
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ if (pDeviceInterface != NULL) {
+ ma_IMMDevice_Release(pDeviceInterface);
+ }
+#else
+ if (pDeviceInterface != NULL) {
+ ma_IUnknown_Release(pDeviceInterface);
+ }
+#endif
+
+ if (result != MA_SUCCESS) {
+ if (pData->pRenderClient) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient);
+ pData->pRenderClient = NULL;
+ }
+ if (pData->pCaptureClient) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient);
+ pData->pCaptureClient = NULL;
+ }
+ if (pData->pAudioClient) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
+ pData->pAudioClient = NULL;
+ }
+
+ if (errorMsg != NULL && errorMsg[0] != '\0') {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "%s", errorMsg);
+ }
+
+ return result;
+ } else {
+ return MA_SUCCESS;
+ }
+}
+
+static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType)
+{
+ ma_device_init_internal_data__wasapi data;
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* We only re-initialize the playback or capture device. Never a full-duplex device. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+
+ /*
+ Before reinitializing the device we need to free the previous audio clients.
+
+ There's a known memory leak here. We will be calling this from the routing change callback that
+ is fired by WASAPI. If we attempt to release the IAudioClient we will deadlock. In my opinion
+ this is a bug. I'm not sure what I need to do to handle this cleanly, but I think we'll probably
+ need some system where we post an event, but delay the execution of it until the callback has
+ returned. I'm not sure how to do this reliably, however. I have set up some infrastructure for
+ a command thread which might be useful for this.
+ */
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
+ if (pDevice->wasapi.pCaptureClient) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+
+ if (pDevice->wasapi.pAudioClientCapture) {
+ /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_capture);*/
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ if (pDevice->wasapi.pRenderClient) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ pDevice->wasapi.pRenderClient = NULL;
+ }
+
+ if (pDevice->wasapi.pAudioClientPlayback) {
+ /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_playback);*/
+ pDevice->wasapi.pAudioClientPlayback = NULL;
+ }
+ }
+
+
+ if (deviceType == ma_device_type_playback) {
+ data.formatIn = pDevice->playback.format;
+ data.channelsIn = pDevice->playback.channels;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
+ data.shareMode = pDevice->playback.shareMode;
+ } else {
+ data.formatIn = pDevice->capture.format;
+ data.channelsIn = pDevice->capture.channels;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
+ data.shareMode = pDevice->capture.shareMode;
+ }
+
+ data.sampleRateIn = pDevice->sampleRate;
+ data.periodSizeInFramesIn = pDevice->wasapi.originalPeriodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDevice->wasapi.originalPeriodSizeInMilliseconds;
+ data.periodsIn = pDevice->wasapi.originalPeriods;
+ data.performanceProfile = pDevice->wasapi.originalPerformanceProfile;
+ data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading;
+ result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
+ pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
+ pDevice->wasapi.pCaptureClient = data.pCaptureClient;
+
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName);
+
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
+
+ pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture);
+
+ /* We must always have a valid ID. */
+ ma_wcscpy_s(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi);
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
+ pDevice->wasapi.pRenderClient = data.pRenderClient;
+
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName);
+
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
+
+ pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback);
+
+ /* We must always have a valid ID because rerouting will look at it. */
+ ma_wcscpy_s(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init__wasapi(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ ma_result result = MA_SUCCESS;
+
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ HRESULT hr;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+#endif
+
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->wasapi);
+ pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
+
+ /* Exclusive mode is not allowed with loopback. */
+ if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) {
+ return MA_INVALID_DEVICE_CONFIG;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) {
+ ma_device_init_internal_data__wasapi data;
+ data.formatIn = pDescriptorCapture->format;
+ data.channelsIn = pDescriptorCapture->channels;
+ data.sampleRateIn = pDescriptorCapture->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap));
+ data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds;
+ data.periodsIn = pDescriptorCapture->periodCount;
+ data.shareMode = pDescriptorCapture->shareMode;
+ data.performanceProfile = pConfig->performanceProfile;
+ data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
+
+ result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pDescriptorCapture->pDeviceID, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
+ pDevice->wasapi.pCaptureClient = data.pCaptureClient;
+ pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds;
+ pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames;
+ pDevice->wasapi.originalPeriods = pDescriptorCapture->periodCount;
+ pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile;
+
+ /*
+ The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled,
+ however, because we want to block until we actually have something for the first call to ma_device_read().
+ */
+ pDevice->wasapi.hEventCapture = CreateEventW(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */
+ if (pDevice->wasapi.hEventCapture == NULL) {
+ result = ma_result_from_GetLastError(GetLastError());
+
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
+
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture.");
+ return result;
+ }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
+
+ pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture);
+
+ /* We must always have a valid ID. */
+ ma_wcscpy_s(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi);
+
+ /* The descriptor needs to be updated with actual values. */
+ pDescriptorCapture->format = data.formatOut;
+ pDescriptorCapture->channels = data.channelsOut;
+ pDescriptorCapture->sampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut;
+ pDescriptorCapture->periodCount = data.periodsOut;
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__wasapi data;
+ data.formatIn = pDescriptorPlayback->format;
+ data.channelsIn = pDescriptorPlayback->channels;
+ data.sampleRateIn = pDescriptorPlayback->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap));
+ data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds;
+ data.periodsIn = pDescriptorPlayback->periodCount;
+ data.shareMode = pDescriptorPlayback->shareMode;
+ data.performanceProfile = pConfig->performanceProfile;
+ data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
+
+ result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data);
+ if (result != MA_SUCCESS) {
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
+
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ pDevice->wasapi.hEventCapture = NULL;
+ }
+ return result;
+ }
+
+ pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
+ pDevice->wasapi.pRenderClient = data.pRenderClient;
+ pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds;
+ pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames;
+ pDevice->wasapi.originalPeriods = pDescriptorPlayback->periodCount;
+ pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile;
+
+ /*
+ The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled
+ only after the whole available space has been filled, never before.
+
+ The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able
+ to get passed WaitForMultipleObjects().
+ */
+ pDevice->wasapi.hEventPlayback = CreateEventW(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */
+ if (pDevice->wasapi.hEventPlayback == NULL) {
+ result = ma_result_from_GetLastError(GetLastError());
+
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
+
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ pDevice->wasapi.hEventCapture = NULL;
+ }
+
+ if (pDevice->wasapi.pRenderClient != NULL) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ pDevice->wasapi.pRenderClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientPlayback != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ pDevice->wasapi.pAudioClientPlayback = NULL;
+ }
+
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback.");
+ return result;
+ }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
+
+ pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback);
+
+ /* We must always have a valid ID because rerouting will look at it. */
+ ma_wcscpy_s(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi);
+
+ /* The descriptor needs to be updated with actual values. */
+ pDescriptorPlayback->format = data.formatOut;
+ pDescriptorPlayback->channels = data.channelsOut;
+ pDescriptorPlayback->sampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut;
+ pDescriptorPlayback->periodCount = data.periodsOut;
+ }
+
+ /*
+ We need to register a notification client to detect when the device has been disabled, unplugged or re-routed (when the default device changes). When
+ we are connecting to the default device we want to do automatic stream routing when the device is disabled or unplugged. Otherwise we want to just
+ stop the device outright and let the application handle it.
+ */
+#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK)
+ if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) {
+ if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID == NULL) {
+ pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE;
+ }
+ if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) {
+ pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE;
+ }
+ }
+
+ hr = ma_CoCreateInstance(pDevice->pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_device_uninit__wasapi(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl;
+ pDevice->wasapi.notificationClient.counter = 1;
+ pDevice->wasapi.notificationClient.pDevice = pDevice;
+
+ hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient);
+ if (SUCCEEDED(hr)) {
+ pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator;
+ } else {
+ /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ }
+#endif
+
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount)
+{
+ ma_uint32 paddingFramesCount;
+ HRESULT hr;
+ ma_share_mode shareMode;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFrameCount != NULL);
+
+ *pFrameCount = 0;
+
+ if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) {
+ return MA_INVALID_OPERATION;
+ }
+
+ /*
+ I've had a report that GetCurrentPadding() is returning a frame count of 0 which is preventing
+ higher level function calls from doing anything because it thinks nothing is available. I have
+ taken a look at the documentation and it looks like this is unnecessary in exclusive mode.
+
+ From Microsoft's documentation:
+
+ For an exclusive-mode rendering or capture stream that was initialized with the
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK flag, the client typically has no use for the padding
+ value reported by GetCurrentPadding. Instead, the client accesses an entire buffer during
+ each processing pass.
+
+ Considering this, I'm going to skip GetCurrentPadding() for exclusive mode and just report the
+ entire buffer. This depends on the caller making sure they wait on the event handler.
+ */
+ shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode;
+ if (shareMode == ma_share_mode_shared) {
+ /* Shared mode. */
+ hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) {
+ *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback - paddingFramesCount;
+ } else {
+ *pFrameCount = paddingFramesCount;
+ }
+ } else {
+ /* Exclusive mode. */
+ if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) {
+ *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback;
+ } else {
+ *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesCapture;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType)
+{
+ ma_result result;
+
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "=== CHANGING DEVICE ===\n");
+
+ result = ma_device_reinit__wasapi(pDevice, deviceType);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WASAPI] Reinitializing device after route change failed.\n");
+ return result;
+ }
+
+ ma_device__post_init_setup(pDevice, deviceType);
+
+ ma_device__on_notification_rerouted(pDevice);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__wasapi(ma_device* pDevice)
+{
+ HRESULT hr;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__wasapi(ma_device* pDevice)
+{
+ ma_result result;
+ HRESULT hr;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* The audio client needs to be reset otherwise restarting will fail. */
+ hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* If we have a mapped buffer we need to release it. */
+ if (pDevice->wasapi.pMappedBufferCapture != NULL) {
+ ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap);
+ pDevice->wasapi.pMappedBufferCapture = NULL;
+ pDevice->wasapi.mappedBufferCaptureCap = 0;
+ pDevice->wasapi.mappedBufferCaptureLen = 0;
+ }
+
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /*
+ The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to
+ the speakers. This is a problem for very short sounds because it'll result in a significant portion of it not getting played.
+ */
+ if (c89atomic_load_32(&pDevice->wasapi.isStartedPlayback)) {
+ /* We need to make sure we put a timeout here or else we'll risk getting stuck in a deadlock in some cases. */
+ DWORD waitTime = pDevice->wasapi.actualBufferSizeInFramesPlayback / pDevice->playback.internalSampleRate;
+
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
+ WaitForSingleObject(pDevice->wasapi.hEventPlayback, waitTime);
+ } else {
+ ma_uint32 prevFramesAvaialablePlayback = (ma_uint32)-1;
+ ma_uint32 framesAvailablePlayback;
+ for (;;) {
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ if (framesAvailablePlayback >= pDevice->wasapi.actualBufferSizeInFramesPlayback) {
+ break;
+ }
+
+ /*
+ Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames
+ has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case.
+ */
+ if (framesAvailablePlayback == prevFramesAvaialablePlayback) {
+ break;
+ }
+ prevFramesAvaialablePlayback = framesAvailablePlayback;
+
+ WaitForSingleObject(pDevice->wasapi.hEventPlayback, waitTime);
+ ResetEvent(pDevice->wasapi.hEventPlayback); /* Manual reset. */
+ }
+ }
+ }
+
+ hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* The audio client needs to be reset otherwise restarting will fail. */
+ hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if (pDevice->wasapi.pMappedBufferPlayback != NULL) {
+ ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0);
+ pDevice->wasapi.pMappedBufferPlayback = NULL;
+ pDevice->wasapi.mappedBufferPlaybackCap = 0;
+ pDevice->wasapi.mappedBufferPlaybackLen = 0;
+ }
+
+ c89atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
+ }
+
+ return MA_SUCCESS;
+}
+
+
+#ifndef MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS
+#define MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS 5000
+#endif
+
+static ma_result ma_device_read__wasapi(ma_device* pDevice, void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalFramesProcessed = 0;
+
+ /*
+ When reading, we need to get a buffer and process all of it before releasing it. Because the
+ frame count (frameCount) can be different to the size of the buffer, we'll need to cache the
+ pointer to the buffer.
+ */
+
+ /* Keep running until we've processed the requested number of frames. */
+ while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) {
+ ma_uint32 framesRemaining = frameCount - totalFramesProcessed;
+
+ /* If we have a mapped data buffer, consume that first. */
+ if (pDevice->wasapi.pMappedBufferCapture != NULL) {
+ /* We have a cached data pointer so consume that before grabbing another one from WASAPI. */
+ ma_uint32 framesToProcessNow = framesRemaining;
+ if (framesToProcessNow > pDevice->wasapi.mappedBufferCaptureLen) {
+ framesToProcessNow = pDevice->wasapi.mappedBufferCaptureLen;
+ }
+
+ /* Now just copy the data over to the output buffer. */
+ ma_copy_pcm_frames(
+ ma_offset_pcm_frames_ptr(pFrames, totalFramesProcessed, pDevice->capture.internalFormat, pDevice->capture.internalChannels),
+ ma_offset_pcm_frames_const_ptr(pDevice->wasapi.pMappedBufferCapture, pDevice->wasapi.mappedBufferCaptureCap - pDevice->wasapi.mappedBufferCaptureLen, pDevice->capture.internalFormat, pDevice->capture.internalChannels),
+ framesToProcessNow,
+ pDevice->capture.internalFormat, pDevice->capture.internalChannels
+ );
+
+ totalFramesProcessed += framesToProcessNow;
+ pDevice->wasapi.mappedBufferCaptureLen -= framesToProcessNow;
+
+ /* If the data buffer has been fully consumed we need to release it. */
+ if (pDevice->wasapi.mappedBufferCaptureLen == 0) {
+ ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap);
+ pDevice->wasapi.pMappedBufferCapture = NULL;
+ pDevice->wasapi.mappedBufferCaptureCap = 0;
+ }
+ } else {
+ /* We don't have any cached data pointer, so grab another one. */
+ HRESULT hr;
+ DWORD flags;
+
+ /* First just ask WASAPI for a data buffer. If it's not available, we'll wait for more. */
+ hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL);
+ if (hr == S_OK) {
+ /* We got a data buffer. Continue to the next loop iteration which will then read from the mapped pointer. */
+
+ /* Overrun detection. */
+ if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) {
+ /* Glitched. Probably due to an overrun. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity (possible overrun). Attempting recovery. mappedBufferCaptureCap=%d\n", pDevice->wasapi.mappedBufferCaptureCap);
+
+ /*
+ If we got an overrun it probably means we're straddling the end of the buffer. In order to prevent
+ a never-ending sequence of glitches we're going to recover by completely clearing out the capture
+ buffer.
+ */
+ {
+ ma_uint32 iterationCount = 4; /* Safety to prevent an infinite loop. */
+ ma_uint32 i;
+
+ for (i = 0; i < iterationCount; i += 1) {
+ hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap);
+ if (FAILED(hr)) {
+ break;
+ }
+
+ hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL);
+ if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || FAILED(hr)) {
+ break;
+ }
+ }
+ }
+
+ /* We should not have a valid buffer at this point so make sure everything is empty. */
+ pDevice->wasapi.pMappedBufferCapture = NULL;
+ pDevice->wasapi.mappedBufferCaptureCap = 0;
+ pDevice->wasapi.mappedBufferCaptureLen = 0;
+ } else {
+ /* The data is clean. */
+ pDevice->wasapi.mappedBufferCaptureLen = pDevice->wasapi.mappedBufferCaptureCap;
+
+ if (flags != 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Capture Flags: %ld\n", flags);
+ }
+ }
+
+ continue;
+ } else {
+ if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || hr == MA_AUDCLNT_E_BUFFER_ERROR) {
+ /*
+ No data is available. We need to wait for more. There's two situations to consider
+ here. The first is normal capture mode. If this times out it probably means the
+ microphone isn't delivering data for whatever reason. In this case we'll just
+ abort the read and return whatever we were able to get. The other situations is
+ loopback mode, in which case a timeout probably just means the nothing is playing
+ through the speakers.
+ */
+ if (WaitForSingleObject(pDevice->wasapi.hEventCapture, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) {
+ if (pDevice->type == ma_device_type_loopback) {
+ continue; /* Keep waiting in loopback mode. */
+ } else {
+ result = MA_ERROR;
+ break; /* Wait failed. */
+ }
+ }
+
+ /* At this point we should be able to loop back to the start of the loop and try retrieving a data buffer again. */
+ } else {
+ /* An error occured and we need to abort. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for reading from the device. HRESULT = %d. Stopping device.\n", (int)hr);
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ If we were unable to process the entire requested frame count, but we still have a mapped buffer,
+ there's a good chance either an error occurred or the device was stopped mid-read. In this case
+ we'll need to make sure the buffer is released.
+ */
+ if (totalFramesProcessed < frameCount && pDevice->wasapi.pMappedBufferCapture != NULL) {
+ ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap);
+ pDevice->wasapi.pMappedBufferCapture = NULL;
+ pDevice->wasapi.mappedBufferCaptureCap = 0;
+ pDevice->wasapi.mappedBufferCaptureLen = 0;
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_write__wasapi(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalFramesProcessed = 0;
+
+ /* Keep writing to the device until it's stopped or we've consumed all of our input. */
+ while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) {
+ ma_uint32 framesRemaining = frameCount - totalFramesProcessed;
+
+ /*
+ We're going to do this in a similar way to capture. We'll first check if the cached data pointer
+ is valid, and if so, read from that. Otherwise We will call IAudioRenderClient_GetBuffer() with
+ a requested buffer size equal to our actual period size. If it returns AUDCLNT_E_BUFFER_TOO_LARGE
+ it means we need to wait for some data to become available.
+ */
+ if (pDevice->wasapi.pMappedBufferPlayback != NULL) {
+ /* We still have some space available in the mapped data buffer. Write to it. */
+ ma_uint32 framesToProcessNow = framesRemaining;
+ if (framesToProcessNow > (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen)) {
+ framesToProcessNow = (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen);
+ }
+
+ /* Now just copy the data over to the output buffer. */
+ ma_copy_pcm_frames(
+ ma_offset_pcm_frames_ptr(pDevice->wasapi.pMappedBufferPlayback, pDevice->wasapi.mappedBufferPlaybackLen, pDevice->playback.internalFormat, pDevice->playback.internalChannels),
+ ma_offset_pcm_frames_const_ptr(pFrames, totalFramesProcessed, pDevice->playback.internalFormat, pDevice->playback.internalChannels),
+ framesToProcessNow,
+ pDevice->playback.internalFormat, pDevice->playback.internalChannels
+ );
+
+ totalFramesProcessed += framesToProcessNow;
+ pDevice->wasapi.mappedBufferPlaybackLen += framesToProcessNow;
+
+ /* If the data buffer has been fully consumed we need to release it. */
+ if (pDevice->wasapi.mappedBufferPlaybackLen == pDevice->wasapi.mappedBufferPlaybackCap) {
+ ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0);
+ pDevice->wasapi.pMappedBufferPlayback = NULL;
+ pDevice->wasapi.mappedBufferPlaybackCap = 0;
+ pDevice->wasapi.mappedBufferPlaybackLen = 0;
+
+ /*
+ In exclusive mode we need to wait here. Exclusive mode is weird because GetBuffer() never
+ seems to return AUDCLNT_E_BUFFER_TOO_LARGE, which is what we normally use to determine
+ whether or not we need to wait for more data.
+ */
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
+ if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break; /* Wait failed. Probably timed out. */
+ }
+ }
+ }
+ } else {
+ /* We don't have a mapped data buffer so we'll need to get one. */
+ HRESULT hr;
+ ma_uint32 bufferSizeInFrames;
+
+ /* Special rules for exclusive mode. */
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
+ bufferSizeInFrames = pDevice->wasapi.actualBufferSizeInFramesPlayback;
+ } else {
+ bufferSizeInFrames = pDevice->wasapi.periodSizeInFramesPlayback;
+ }
+
+ hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, bufferSizeInFrames, (BYTE**)&pDevice->wasapi.pMappedBufferPlayback);
+ if (hr == S_OK) {
+ /* We have data available. */
+ pDevice->wasapi.mappedBufferPlaybackCap = bufferSizeInFrames;
+ pDevice->wasapi.mappedBufferPlaybackLen = 0;
+ } else {
+ if (hr == MA_AUDCLNT_E_BUFFER_TOO_LARGE || hr == MA_AUDCLNT_E_BUFFER_ERROR) {
+ /* Not enough data available. We need to wait for more. */
+ if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break; /* Wait failed. Probably timed out. */
+ }
+ } else {
+ /* Some error occurred. We'll need to abort. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device. HRESULT = %d. Stopping device.\n", (int)hr);
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+ }
+ }
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_data_loop_wakeup__wasapi(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ SetEvent((HANDLE)pDevice->wasapi.hEventCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ SetEvent((HANDLE)pDevice->wasapi.hEventPlayback);
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_uninit__wasapi(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_wasapi);
+
+ if (pContext->wasapi.commandThread != NULL) {
+ ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_QUIT__WASAPI);
+ ma_context_post_command__wasapi(pContext, &cmd);
+ ma_thread_wait(&pContext->wasapi.commandThread);
+
+ /* Only after the thread has been terminated can we uninitialize the sync objects for the command thread. */
+ ma_semaphore_uninit(&pContext->wasapi.commandSem);
+ ma_mutex_uninit(&pContext->wasapi.commandLock);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__wasapi(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ ma_result result = MA_SUCCESS;
+
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+
+#ifdef MA_WIN32_DESKTOP
+ /*
+ WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven
+ exclusive mode does not work until SP1.
+
+ Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a link error.
+ */
+ {
+ ma_OSVERSIONINFOEXW osvi;
+ ma_handle kernel32DLL;
+ ma_PFNVerifyVersionInfoW _VerifyVersionInfoW;
+ ma_PFNVerSetConditionMask _VerSetConditionMask;
+
+ kernel32DLL = ma_dlopen(pContext, "kernel32.dll");
+ if (kernel32DLL == NULL) {
+ return MA_NO_BACKEND;
+ }
+
+ _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW )ma_dlsym(pContext, kernel32DLL, "VerifyVersionInfoW");
+ _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(pContext, kernel32DLL, "VerSetConditionMask");
+ if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) {
+ ma_dlclose(pContext, kernel32DLL);
+ return MA_NO_BACKEND;
+ }
+
+ MA_ZERO_OBJECT(&osvi);
+ osvi.dwOSVersionInfoSize = sizeof(osvi);
+ osvi.dwMajorVersion = ((MA_WIN32_WINNT_VISTA >> 8) & 0xFF);
+ osvi.dwMinorVersion = ((MA_WIN32_WINNT_VISTA >> 0) & 0xFF);
+ osvi.wServicePackMajor = 1;
+ if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) {
+ result = MA_SUCCESS;
+ } else {
+ result = MA_NO_BACKEND;
+ }
+
+ ma_dlclose(pContext, kernel32DLL);
+ }
+#endif
+
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ MA_ZERO_OBJECT(&pContext->wasapi);
+
+ /*
+ Annoyingly, WASAPI does not allow you to release an IAudioClient object from a different thread
+ than the one that retrieved it with GetService(). This can result in a deadlock in two
+ situations:
+
+ 1) When calling ma_device_uninit() from a different thread to ma_device_init(); and
+ 2) When uninitializing and reinitializing the internal IAudioClient object in response to
+ automatic stream routing.
+
+ We could define ma_device_uninit() such that it must be called on the same thread as
+ ma_device_init(). We could also just not release the IAudioClient when performing automatic
+ stream routing to avoid the deadlock. Neither of these are acceptable solutions in my view so
+ we're going to have to work around this with a worker thread. This is not ideal, but I can't
+ think of a better way to do this.
+
+ More information about this can be found here:
+
+ https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nn-audioclient-iaudiorenderclient
+
+ Note this section:
+
+ When releasing an IAudioRenderClient interface instance, the client must call the interface's
+ Release method from the same thread as the call to IAudioClient::GetService that created the
+ object.
+ */
+ {
+ result = ma_mutex_init(&pContext->wasapi.commandLock);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_semaphore_init(0, &pContext->wasapi.commandSem);
+ if (result != MA_SUCCESS) {
+ ma_mutex_uninit(&pContext->wasapi.commandLock);
+ return result;
+ }
+
+ result = ma_thread_create(&pContext->wasapi.commandThread, ma_thread_priority_normal, 0, ma_context_command_thread__wasapi, pContext, &pContext->allocationCallbacks);
+ if (result != MA_SUCCESS) {
+ ma_semaphore_uninit(&pContext->wasapi.commandSem);
+ ma_mutex_uninit(&pContext->wasapi.commandLock);
+ return result;
+ }
+ }
+
+
+ pCallbacks->onContextInit = ma_context_init__wasapi;
+ pCallbacks->onContextUninit = ma_context_uninit__wasapi;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__wasapi;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__wasapi;
+ pCallbacks->onDeviceInit = ma_device_init__wasapi;
+ pCallbacks->onDeviceUninit = ma_device_uninit__wasapi;
+ pCallbacks->onDeviceStart = ma_device_start__wasapi;
+ pCallbacks->onDeviceStop = ma_device_stop__wasapi;
+ pCallbacks->onDeviceRead = ma_device_read__wasapi;
+ pCallbacks->onDeviceWrite = ma_device_write__wasapi;
+ pCallbacks->onDeviceDataLoop = NULL;
+ pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__wasapi;
+
+ return MA_SUCCESS;
+}
+#endif
+
+/******************************************************************************
+
+DirectSound Backend
+
+******************************************************************************/
+#ifdef MA_HAS_DSOUND
+/*#include <dsound.h>*/
+
+/*static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}};*/
+
+/* miniaudio only uses priority or exclusive modes. */
+#define MA_DSSCL_NORMAL 1
+#define MA_DSSCL_PRIORITY 2
+#define MA_DSSCL_EXCLUSIVE 3
+#define MA_DSSCL_WRITEPRIMARY 4
+
+#define MA_DSCAPS_PRIMARYMONO 0x00000001
+#define MA_DSCAPS_PRIMARYSTEREO 0x00000002
+#define MA_DSCAPS_PRIMARY8BIT 0x00000004
+#define MA_DSCAPS_PRIMARY16BIT 0x00000008
+#define MA_DSCAPS_CONTINUOUSRATE 0x00000010
+#define MA_DSCAPS_EMULDRIVER 0x00000020
+#define MA_DSCAPS_CERTIFIED 0x00000040
+#define MA_DSCAPS_SECONDARYMONO 0x00000100
+#define MA_DSCAPS_SECONDARYSTEREO 0x00000200
+#define MA_DSCAPS_SECONDARY8BIT 0x00000400
+#define MA_DSCAPS_SECONDARY16BIT 0x00000800
+
+#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001
+#define MA_DSBCAPS_STATIC 0x00000002
+#define MA_DSBCAPS_LOCHARDWARE 0x00000004
+#define MA_DSBCAPS_LOCSOFTWARE 0x00000008
+#define MA_DSBCAPS_CTRL3D 0x00000010
+#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020
+#define MA_DSBCAPS_CTRLPAN 0x00000040
+#define MA_DSBCAPS_CTRLVOLUME 0x00000080
+#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100
+#define MA_DSBCAPS_CTRLFX 0x00000200
+#define MA_DSBCAPS_STICKYFOCUS 0x00004000
+#define MA_DSBCAPS_GLOBALFOCUS 0x00008000
+#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000
+#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000
+#define MA_DSBCAPS_LOCDEFER 0x00040000
+#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000
+
+#define MA_DSBPLAY_LOOPING 0x00000001
+#define MA_DSBPLAY_LOCHARDWARE 0x00000002
+#define MA_DSBPLAY_LOCSOFTWARE 0x00000004
+#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008
+#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010
+#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020
+
+#define MA_DSCBSTART_LOOPING 0x00000001
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+ WAVEFORMATEX* lpwfxFormat;
+ GUID guid3DAlgorithm;
+} MA_DSBUFFERDESC;
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+ WAVEFORMATEX* lpwfxFormat;
+ DWORD dwFXCount;
+ void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */
+} MA_DSCBUFFERDESC;
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwMinSecondarySampleRate;
+ DWORD dwMaxSecondarySampleRate;
+ DWORD dwPrimaryBuffers;
+ DWORD dwMaxHwMixingAllBuffers;
+ DWORD dwMaxHwMixingStaticBuffers;
+ DWORD dwMaxHwMixingStreamingBuffers;
+ DWORD dwFreeHwMixingAllBuffers;
+ DWORD dwFreeHwMixingStaticBuffers;
+ DWORD dwFreeHwMixingStreamingBuffers;
+ DWORD dwMaxHw3DAllBuffers;
+ DWORD dwMaxHw3DStaticBuffers;
+ DWORD dwMaxHw3DStreamingBuffers;
+ DWORD dwFreeHw3DAllBuffers;
+ DWORD dwFreeHw3DStaticBuffers;
+ DWORD dwFreeHw3DStreamingBuffers;
+ DWORD dwTotalHwMemBytes;
+ DWORD dwFreeHwMemBytes;
+ DWORD dwMaxContigFreeHwMemBytes;
+ DWORD dwUnlockTransferRateHwBuffers;
+ DWORD dwPlayCpuOverheadSwBuffers;
+ DWORD dwReserved1;
+ DWORD dwReserved2;
+} MA_DSCAPS;
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwUnlockTransferRate;
+ DWORD dwPlayCpuOverhead;
+} MA_DSBCAPS;
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwFormats;
+ DWORD dwChannels;
+} MA_DSCCAPS;
+
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+} MA_DSCBCAPS;
+
+typedef struct
+{
+ DWORD dwOffset;
+ HANDLE hEventNotify;
+} MA_DSBPOSITIONNOTIFY;
+
+typedef struct ma_IDirectSound ma_IDirectSound;
+typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer;
+typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture;
+typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer;
+typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify;
+
+
+/*
+COM objects. The way these work is that you have a vtable (a list of function pointers, kind of
+like how C++ works internally), and then you have a structure with a single member, which is a
+pointer to the vtable. The vtable is where the methods of the object are defined. Methods need
+to be in a specific order, and parent classes need to have their methods declared first.
+*/
+
+/* IDirectSound */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis);
+
+ /* IDirectSound */
+ HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter);
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps);
+ HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate);
+ HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel);
+ HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis);
+ HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig);
+ HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice);
+} ma_IDirectSoundVtbl;
+struct ma_IDirectSound
+{
+ ma_IDirectSoundVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); }
+static MA_INLINE HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); }
+static MA_INLINE HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); }
+static MA_INLINE HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); }
+static MA_INLINE HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); }
+static MA_INLINE HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); }
+static MA_INLINE HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); }
+static MA_INLINE HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
+
+
+/* IDirectSoundBuffer */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis);
+
+ /* IDirectSoundBuffer */
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor);
+ HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
+ HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume);
+ HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan);
+ HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency);
+ HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc);
+ HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition);
+ HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat);
+ HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume);
+ HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan);
+ HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis);
+ HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
+ HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis);
+} ma_IDirectSoundBufferVtbl;
+struct ma_IDirectSoundBuffer
+{
+ ma_IDirectSoundBufferVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); }
+
+
+/* IDirectSoundCapture */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis);
+
+ /* IDirectSoundCapture */
+ HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter);
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice);
+} ma_IDirectSoundCaptureVtbl;
+struct ma_IDirectSoundCapture
+{
+ ma_IDirectSoundCaptureVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundCapture_QueryInterface (ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundCapture_AddRef (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundCapture_Release (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
+
+
+/* IDirectSoundCaptureBuffer */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis);
+
+ /* IDirectSoundCaptureBuffer */
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition);
+ HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
+ HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc);
+ HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis);
+ HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
+} ma_IDirectSoundCaptureBufferVtbl;
+struct ma_IDirectSoundCaptureBuffer
+{
+ ma_IDirectSoundCaptureBufferVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
+
+
+/* IDirectSoundNotify */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis);
+
+ /* IDirectSoundNotify */
+ HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies);
+} ma_IDirectSoundNotifyVtbl;
+struct ma_IDirectSoundNotify
+{
+ ma_IDirectSoundNotifyVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); }
+
+
+typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (LPGUID pDeviceGUID, LPCSTR pDeviceDescription, LPCSTR pModule, LPVOID pContext);
+typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, LPUNKNOWN pUnkOuter);
+typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
+typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, LPUNKNOWN pUnkOuter);
+typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
+
+static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax)
+{
+ /* Normalize the range in case we were given something stupid. */
+ if (sampleRateMin < (ma_uint32)ma_standard_sample_rate_min) {
+ sampleRateMin = (ma_uint32)ma_standard_sample_rate_min;
+ }
+ if (sampleRateMax > (ma_uint32)ma_standard_sample_rate_max) {
+ sampleRateMax = (ma_uint32)ma_standard_sample_rate_max;
+ }
+ if (sampleRateMin > sampleRateMax) {
+ sampleRateMin = sampleRateMax;
+ }
+
+ if (sampleRateMin == sampleRateMax) {
+ return sampleRateMax;
+ } else {
+ size_t iStandardRate;
+ for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
+ ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
+ if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) {
+ return standardRate;
+ }
+ }
+ }
+
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
+
+/*
+Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown,
+the channel count and channel map will be left unmodified.
+*/
+static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut)
+{
+ WORD channels;
+ DWORD channelMap;
+
+ channels = 0;
+ if (pChannelsOut != NULL) {
+ channels = *pChannelsOut;
+ }
+
+ channelMap = 0;
+ if (pChannelMapOut != NULL) {
+ channelMap = *pChannelMapOut;
+ }
+
+ /*
+ The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper
+ 16 bits is for the geometry.
+ */
+ switch ((BYTE)(speakerConfig)) {
+ case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
+ case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break;
+ case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
+ case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
+ case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break;
+ case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
+ case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break;
+ case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
+ case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
+ default: break;
+ }
+
+ if (pChannelsOut != NULL) {
+ *pChannelsOut = channels;
+ }
+
+ if (pChannelMapOut != NULL) {
+ *pChannelMapOut = channelMap;
+ }
+}
+
+
+static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound)
+{
+ ma_IDirectSound* pDirectSound;
+ HWND hWnd;
+ HRESULT hr;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDirectSound != NULL);
+
+ *ppDirectSound = NULL;
+ pDirectSound = NULL;
+
+ if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ /* The cooperative level must be set before doing anything else. */
+ hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)();
+ if (hWnd == NULL) {
+ hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)();
+ }
+
+ hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ *ppDirectSound = pDirectSound;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture)
+{
+ ma_IDirectSoundCapture* pDirectSoundCapture;
+ HRESULT hr;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDirectSoundCapture != NULL);
+
+ /* DirectSound does not support exclusive mode for capture. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ *ppDirectSoundCapture = NULL;
+ pDirectSoundCapture = NULL;
+
+ hr = ((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ *ppDirectSoundCapture = pDirectSoundCapture;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate)
+{
+ HRESULT hr;
+ MA_DSCCAPS caps;
+ WORD bitsPerSample;
+ DWORD sampleRate;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDirectSoundCapture != NULL);
+
+ if (pChannels) {
+ *pChannels = 0;
+ }
+ if (pBitsPerSample) {
+ *pBitsPerSample = 0;
+ }
+ if (pSampleRate) {
+ *pSampleRate = 0;
+ }
+
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if (pChannels) {
+ *pChannels = (WORD)caps.dwChannels;
+ }
+
+ /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */
+ bitsPerSample = 16;
+ sampleRate = 48000;
+
+ if (caps.dwChannels == 1) {
+ if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ }
+ }
+ } else if (caps.dwChannels == 2) {
+ if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ }
+ }
+ }
+
+ if (pBitsPerSample) {
+ *pBitsPerSample = bitsPerSample;
+ }
+ if (pSampleRate) {
+ *pSampleRate = sampleRate;
+ }
+
+ return MA_SUCCESS;
+}
+
+
+typedef struct
+{
+ ma_context* pContext;
+ ma_device_type deviceType;
+ ma_enum_devices_callback_proc callback;
+ void* pUserData;
+ ma_bool32 terminated;
+} ma_context_enumerate_devices_callback_data__dsound;
+
+static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
+{
+ ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext;
+ ma_device_info deviceInfo;
+
+ (void)lpcstrModule;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ /* ID. */
+ if (lpGuid != NULL) {
+ MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16);
+ } else {
+ MA_ZERO_MEMORY(deviceInfo.id.dsound, 16);
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+ /* Name / Description */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1);
+
+
+ /* Call the callback function, but make sure we stop enumerating if the callee requested so. */
+ MA_ASSERT(pData != NULL);
+ pData->terminated = !pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData);
+ if (pData->terminated) {
+ return FALSE; /* Stop enumeration. */
+ } else {
+ return TRUE; /* Continue enumeration. */
+ }
+}
+
+static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_context_enumerate_devices_callback_data__dsound data;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ data.pContext = pContext;
+ data.callback = callback;
+ data.pUserData = pUserData;
+ data.terminated = MA_FALSE;
+
+ /* Playback. */
+ if (!data.terminated) {
+ data.deviceType = ma_device_type_playback;
+ ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
+ }
+
+ /* Capture. */
+ if (!data.terminated) {
+ data.deviceType = ma_device_type_capture;
+ ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
+ }
+
+ return MA_SUCCESS;
+}
+
+
+typedef struct
+{
+ const ma_device_id* pDeviceID;
+ ma_device_info* pDeviceInfo;
+ ma_bool32 found;
+} ma_context_get_device_info_callback_data__dsound;
+
+static BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
+{
+ ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext;
+ MA_ASSERT(pData != NULL);
+
+ if ((pData->pDeviceID == NULL || ma_is_guid_null(pData->pDeviceID->dsound)) && (lpGuid == NULL || ma_is_guid_null(lpGuid))) {
+ /* Default device. */
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
+ pData->pDeviceInfo->isDefault = MA_TRUE;
+ pData->found = MA_TRUE;
+ return FALSE; /* Stop enumeration. */
+ } else {
+ /* Not the default device. */
+ if (lpGuid != NULL && pData->pDeviceID != NULL) {
+ if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
+ pData->found = MA_TRUE;
+ return FALSE; /* Stop enumeration. */
+ }
+ }
+ }
+
+ (void)lpcstrModule;
+ return TRUE;
+}
+
+static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ ma_result result;
+ HRESULT hr;
+
+ if (pDeviceID != NULL) {
+ ma_context_get_device_info_callback_data__dsound data;
+
+ /* ID. */
+ MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16);
+
+ /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */
+ data.pDeviceID = pDeviceID;
+ data.pDeviceInfo = pDeviceInfo;
+ data.found = MA_FALSE;
+ if (deviceType == ma_device_type_playback) {
+ ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
+ } else {
+ ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
+ }
+
+ if (!data.found) {
+ return MA_NO_DEVICE;
+ }
+ } else {
+ /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */
+
+ /* ID */
+ MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16);
+
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+
+ pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ /* Retrieving detailed information is slightly different depending on the device type. */
+ if (deviceType == ma_device_type_playback) {
+ /* Playback. */
+ ma_IDirectSound* pDirectSound;
+ MA_DSCAPS caps;
+ WORD channels;
+
+ result = ma_context_create_IDirectSound__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSound);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSound_GetCaps(pDirectSound, &caps);
+ if (FAILED(hr)) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+
+ /* Channels. Only a single channel count is reported for DirectSound. */
+ if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
+ /* It supports at least stereo, but could support more. */
+ DWORD speakerConfig;
+
+ channels = 2;
+
+ /* Look at the speaker configuration to get a better idea on the channel count. */
+ hr = ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig);
+ if (SUCCEEDED(hr)) {
+ ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL);
+ }
+ } else {
+ /* It does not support stereo, which means we are stuck with mono. */
+ channels = 1;
+ }
+
+
+ /*
+ In DirectSound, our native formats are centered around sample rates. All formats are supported, and we're only reporting a single channel
+ count. However, DirectSound can report a range of supported sample rates. We're only going to include standard rates known by miniaudio
+ in order to keep the size of this within reason.
+ */
+ if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
+ /* Multiple sample rates are supported. We'll report in order of our preferred sample rates. */
+ size_t iStandardSampleRate;
+ for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) {
+ ma_uint32 sampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate];
+ if (sampleRate >= caps.dwMinSecondarySampleRate && sampleRate <= caps.dwMaxSecondarySampleRate) {
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0;
+ pDeviceInfo->nativeDataFormatCount += 1;
+ }
+ }
+ } else {
+ /* Only a single sample rate is supported. */
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = caps.dwMaxSecondarySampleRate;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0;
+ pDeviceInfo->nativeDataFormatCount += 1;
+ }
+
+ ma_IDirectSound_Release(pDirectSound);
+ } else {
+ /*
+ Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture
+ devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just
+ reporting the best format.
+ */
+ ma_IDirectSoundCapture* pDirectSoundCapture;
+ WORD channels;
+ WORD bitsPerSample;
+ DWORD sampleRate;
+
+ result = ma_context_create_IDirectSoundCapture__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSoundCapture);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate);
+ if (result != MA_SUCCESS) {
+ ma_IDirectSoundCapture_Release(pDirectSoundCapture);
+ return result;
+ }
+
+ ma_IDirectSoundCapture_Release(pDirectSoundCapture);
+
+ /* The format is always an integer format and is based on the bits per sample. */
+ if (bitsPerSample == 8) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_u8;
+ } else if (bitsPerSample == 16) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s16;
+ } else if (bitsPerSample == 24) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s24;
+ } else if (bitsPerSample == 32) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s32;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ pDeviceInfo->nativeDataFormats[0].channels = channels;
+ pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate;
+ pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pDeviceInfo->nativeDataFormatCount = 1;
+ }
+
+ return MA_SUCCESS;
+}
+
+
+
+static ma_result ma_device_uninit__dsound(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->dsound.pCaptureBuffer != NULL) {
+ ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ }
+ if (pDevice->dsound.pCapture != NULL) {
+ ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture);
+ }
+
+ if (pDevice->dsound.pPlaybackBuffer != NULL) {
+ ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer);
+ }
+ if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) {
+ ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer);
+ }
+ if (pDevice->dsound.pPlayback != NULL) {
+ ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF)
+{
+ GUID subformat;
+
+ if (format == ma_format_unknown) {
+ format = MA_DEFAULT_FORMAT;
+ }
+
+ if (channels == 0) {
+ channels = MA_DEFAULT_CHANNELS;
+ }
+
+ if (sampleRate == 0) {
+ sampleRate = MA_DEFAULT_SAMPLE_RATE;
+ }
+
+ switch (format)
+ {
+ case ma_format_u8:
+ case ma_format_s16:
+ case ma_format_s24:
+ /*case ma_format_s24_32:*/
+ case ma_format_s32:
+ {
+ subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ } break;
+
+ case ma_format_f32:
+ {
+ subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ } break;
+
+ default:
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ MA_ZERO_OBJECT(pWF);
+ pWF->Format.cbSize = sizeof(*pWF);
+ pWF->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ pWF->Format.nChannels = (WORD)channels;
+ pWF->Format.nSamplesPerSec = (DWORD)sampleRate;
+ pWF->Format.wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8);
+ pWF->Format.nBlockAlign = (WORD)(pWF->Format.nChannels * pWF->Format.wBitsPerSample / 8);
+ pWF->Format.nAvgBytesPerSec = pWF->Format.nBlockAlign * pWF->Format.nSamplesPerSec;
+ pWF->Samples.wValidBitsPerSample = pWF->Format.wBitsPerSample;
+ pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels);
+ pWF->SubFormat = subformat;
+
+ return MA_SUCCESS;
+}
+
+static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__dsound(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile)
+{
+ /* DirectSound has a minimum period size of 20ms. */
+ ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(20, nativeSampleRate);
+ ma_uint32 periodSizeInFrames;
+
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile);
+ if (periodSizeInFrames < minPeriodSizeInFrames) {
+ periodSizeInFrames = minPeriodSizeInFrames;
+ }
+
+ return periodSizeInFrames;
+}
+
+static ma_result ma_device_init__dsound(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ ma_result result;
+ HRESULT hr;
+
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->dsound);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /*
+ Unfortunately DirectSound uses different APIs and data structures for playback and catpure devices. We need to initialize
+ the capture device first because we'll want to match it's buffer size and period count on the playback side if we're using
+ full-duplex mode.
+ */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEFORMATEXTENSIBLE wf;
+ MA_DSCBUFFERDESC descDS;
+ ma_uint32 periodSizeInFrames;
+ ma_uint32 periodCount;
+ char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
+ WAVEFORMATEXTENSIBLE* pActualFormat;
+
+ result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &wf);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_context_create_IDirectSoundCapture__dsound(pDevice->pContext, pDescriptorCapture->shareMode, pDescriptorCapture->pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
+
+ result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pDevice->pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.Format.nChannels, &wf.Format.wBitsPerSample, &wf.Format.nSamplesPerSec);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
+
+ wf.Format.nBlockAlign = (WORD)(wf.Format.nChannels * wf.Format.wBitsPerSample / 8);
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+ wf.Samples.wValidBitsPerSample = wf.Format.wBitsPerSample;
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+
+ /* The size of the buffer must be a clean multiple of the period count. */
+ periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorCapture, wf.Format.nSamplesPerSec, pConfig->performanceProfile);
+ periodCount = (pDescriptorCapture->periodCount > 0) ? pDescriptorCapture->periodCount : MA_DEFAULT_PERIODS;
+
+ MA_ZERO_OBJECT(&descDS);
+ descDS.dwSize = sizeof(descDS);
+ descDS.dwFlags = 0;
+ descDS.dwBufferBytes = periodSizeInFrames * periodCount * wf.Format.nBlockAlign;
+ descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
+ hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* Get the _actual_ properties of the buffer. */
+ pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
+ hr = ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* We can now start setting the output data formats. */
+ pDescriptorCapture->format = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
+ pDescriptorCapture->channels = pActualFormat->Format.nChannels;
+ pDescriptorCapture->sampleRate = pActualFormat->Format.nSamplesPerSec;
+
+ /* Get the native channel map based on the channel mask. */
+ if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap);
+ } else {
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap);
+ }
+
+ /*
+ After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the
+ user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case.
+ */
+ if (periodSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / periodCount)) {
+ descDS.dwBufferBytes = periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) * periodCount;
+ ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+
+ hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.");
+ return ma_result_from_HRESULT(hr);
+ }
+ }
+
+ /* DirectSound should give us a buffer exactly the size we asked for. */
+ pDescriptorCapture->periodSizeInFrames = periodSizeInFrames;
+ pDescriptorCapture->periodCount = periodCount;
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEFORMATEXTENSIBLE wf;
+ MA_DSBUFFERDESC descDSPrimary;
+ MA_DSCAPS caps;
+ char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
+ WAVEFORMATEXTENSIBLE* pActualFormat;
+ ma_uint32 periodSizeInFrames;
+ ma_uint32 periodCount;
+ MA_DSBUFFERDESC descDS;
+
+ result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorPlayback->format, pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, &wf);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_context_create_IDirectSound__dsound(pDevice->pContext, pDescriptorPlayback->shareMode, pDescriptorPlayback->pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
+
+ MA_ZERO_OBJECT(&descDSPrimary);
+ descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC);
+ descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME;
+ hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+
+ /* We may want to make some adjustments to the format if we are using defaults. */
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ if (pDescriptorPlayback->channels == 0) {
+ if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
+ DWORD speakerConfig;
+
+ /* It supports at least stereo, but could support more. */
+ wf.Format.nChannels = 2;
+
+ /* Look at the speaker configuration to get a better idea on the channel count. */
+ if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) {
+ ma_get_channels_from_speaker_config__dsound(speakerConfig, &wf.Format.nChannels, &wf.dwChannelMask);
+ }
+ } else {
+ /* It does not support stereo, which means we are stuck with mono. */
+ wf.Format.nChannels = 1;
+ }
+ }
+
+ if (pDescriptorPlayback->sampleRate == 0) {
+ /* We base the sample rate on the values returned by GetCaps(). */
+ if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
+ wf.Format.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate);
+ } else {
+ wf.Format.nSamplesPerSec = caps.dwMaxSecondarySampleRate;
+ }
+ }
+
+ wf.Format.nBlockAlign = (WORD)(wf.Format.nChannels * wf.Format.wBitsPerSample / 8);
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+
+ /*
+ From MSDN:
+
+ The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest
+ supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer
+ and compare the result with the format that was requested with the SetFormat method.
+ */
+ hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)&wf);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* Get the _actual_ properties of the buffer. */
+ pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
+ hr = ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* We now have enough information to start setting some output properties. */
+ pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
+ pDescriptorPlayback->channels = pActualFormat->Format.nChannels;
+ pDescriptorPlayback->sampleRate = pActualFormat->Format.nSamplesPerSec;
+
+ /* Get the internal channel map based on the channel mask. */
+ if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap);
+ } else {
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap);
+ }
+
+ /* The size of the buffer must be a clean multiple of the period count. */
+ periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile);
+ periodCount = (pDescriptorPlayback->periodCount > 0) ? pDescriptorPlayback->periodCount : MA_DEFAULT_PERIODS;
+
+ /*
+ Meaning of dwFlags (from MSDN):
+
+ DSBCAPS_CTRLPOSITIONNOTIFY
+ The buffer has position notification capability.
+
+ DSBCAPS_GLOBALFOCUS
+ With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to
+ another application, even if the new application uses DirectSound.
+
+ DSBCAPS_GETCURRENTPOSITION2
+ In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated
+ sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the
+ application can get a more accurate play cursor.
+ */
+ MA_ZERO_OBJECT(&descDS);
+ descDS.dwSize = sizeof(descDS);
+ descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2;
+ descDS.dwBufferBytes = periodSizeInFrames * periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels);
+ descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
+ hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* DirectSound should give us a buffer exactly the size we asked for. */
+ pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames;
+ pDescriptorPlayback->periodCount = periodCount;
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_data_loop__dsound(ma_device* pDevice)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ HRESULT hr;
+ DWORD lockOffsetInBytesCapture;
+ DWORD lockSizeInBytesCapture;
+ DWORD mappedSizeInBytesCapture;
+ DWORD mappedDeviceFramesProcessedCapture;
+ void* pMappedDeviceBufferCapture;
+ DWORD lockOffsetInBytesPlayback;
+ DWORD lockSizeInBytesPlayback;
+ DWORD mappedSizeInBytesPlayback;
+ void* pMappedDeviceBufferPlayback;
+ DWORD prevReadCursorInBytesCapture = 0;
+ DWORD prevPlayCursorInBytesPlayback = 0;
+ ma_bool32 physicalPlayCursorLoopFlagPlayback = 0;
+ DWORD virtualWriteCursorInBytesPlayback = 0;
+ ma_bool32 virtualWriteCursorLoopFlagPlayback = 0;
+ ma_bool32 isPlaybackDeviceStarted = MA_FALSE;
+ ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */
+ ma_uint32 waitTimeInMilliseconds = 1;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ hr = ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ }
+
+ while (ma_device_get_state(pDevice) == ma_device_state_started) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ DWORD physicalCaptureCursorInBytes;
+ DWORD physicalReadCursorInBytes;
+ hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
+ }
+
+ /* If nothing is available we just sleep for a bit and return from this iteration. */
+ if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
+
+ /*
+ The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure
+ we don't return until every frame has been copied over.
+ */
+ if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
+ /* The capture position has not looped. This is the simple case. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
+ } else {
+ /*
+ The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
+ do it again from the start.
+ */
+ if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) {
+ /* Lock up to the end of the buffer. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture;
+ } else {
+ /* Lock starting from the start of the buffer. */
+ lockOffsetInBytesCapture = 0;
+ lockSizeInBytesCapture = physicalReadCursorInBytes;
+ }
+ }
+
+ if (lockSizeInBytesCapture == 0) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
+
+ hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+
+ /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */
+ mappedDeviceFramesProcessedCapture = 0;
+
+ for (;;) { /* Keep writing to the playback device. */
+ ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint32 outputFramesInClientFormatCount;
+ ma_uint32 outputFramesInClientFormatConsumed = 0;
+ ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap);
+ ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture;
+ void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture);
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess;
+ mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess;
+
+ ma_device__handle_data_callback(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess);
+
+ /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */
+ for (;;) {
+ ma_uint32 framesWrittenThisIteration;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ DWORD availableBytesPlayback;
+ DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */
+
+ /* We need the physical play and write cursors. */
+ if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) {
+ break;
+ }
+
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+
+ /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ /* This is an error. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Play cursor has moved in front of the write cursor (same loop iteration). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ availableBytesPlayback = 0;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* This is an error. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ availableBytesPlayback = 0;
+ }
+ }
+
+ /* If there's no room available for writing we need to wait for more. */
+ if (availableBytesPlayback == 0) {
+ /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
+ if (!isPlaybackDeviceStarted) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ } else {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
+ }
+
+
+ /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
+ lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. Go up to the end of the buffer. */
+ lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* Different loop iterations. Go up to the physical play cursor. */
+ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ }
+
+ hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.");
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+
+ /*
+ Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent
+ endless glitching due to it constantly running out of data.
+ */
+ if (isPlaybackDeviceStarted) {
+ DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback;
+ if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) {
+ silentPaddingInBytes = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback;
+ if (silentPaddingInBytes > lockSizeInBytesPlayback) {
+ silentPaddingInBytes = lockSizeInBytesPlayback;
+ }
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%ld, silentPaddingInBytes=%ld\n", availableBytesPlayback, silentPaddingInBytes);
+ }
+ }
+
+ /* At this point we have a buffer for output. */
+ if (silentPaddingInBytes > 0) {
+ MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes);
+ framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback;
+ } else {
+ ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed);
+ ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback;
+ void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback);
+ void* pConvertedFramesOut = pMappedDeviceBufferPlayback;
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut;
+ framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut;
+ }
+
+
+ hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.");
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+
+ virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback;
+ if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods) {
+ virtualWriteCursorInBytesPlayback = 0;
+ virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
+ }
+
+ /*
+ We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
+ a bit of a buffer to prevent the playback buffer from getting starved.
+ */
+ framesWrittenToPlaybackDevice += framesWrittenThisIteration;
+ if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalPeriodSizeInFrames*2)) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ }
+
+ if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) {
+ break; /* We're finished with the output data.*/
+ }
+ }
+
+ if (clientCapturedFramesToProcess == 0) {
+ break; /* We just consumed every input sample. */
+ }
+ }
+
+
+ /* At this point we're done with the mapped portion of the capture buffer. */
+ hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.");
+ return ma_result_from_HRESULT(hr);
+ }
+ prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture);
+ } break;
+
+
+
+ case ma_device_type_capture:
+ {
+ DWORD physicalCaptureCursorInBytes;
+ DWORD physicalReadCursorInBytes;
+ hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes);
+ if (FAILED(hr)) {
+ return MA_ERROR;
+ }
+
+ /* If the previous capture position is the same as the current position we need to wait a bit longer. */
+ if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
+
+ /* Getting here means we have capture data available. */
+ if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
+ /* The capture position has not looped. This is the simple case. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
+ } else {
+ /*
+ The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
+ do it again from the start.
+ */
+ if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) {
+ /* Lock up to the end of the buffer. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture;
+ } else {
+ /* Lock starting from the start of the buffer. */
+ lockOffsetInBytesCapture = 0;
+ lockSizeInBytesCapture = physicalReadCursorInBytes;
+ }
+ }
+
+ if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
+
+ hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.");
+ result = ma_result_from_HRESULT(hr);
+ }
+
+ if (lockSizeInBytesCapture != mappedSizeInBytesCapture) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[DirectSound] (Capture) lockSizeInBytesCapture=%ld != mappedSizeInBytesCapture=%ld\n", lockSizeInBytesCapture, mappedSizeInBytesCapture);
+ }
+
+ ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture);
+
+ hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.");
+ return ma_result_from_HRESULT(hr);
+ }
+ prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture;
+
+ if (prevReadCursorInBytesCapture == (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture)) {
+ prevReadCursorInBytesCapture = 0;
+ }
+ } break;
+
+
+
+ case ma_device_type_playback:
+ {
+ DWORD availableBytesPlayback;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes);
+ if (FAILED(hr)) {
+ break;
+ }
+
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+
+ /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ /* This is an error. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ availableBytesPlayback = 0;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* This is an error. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ availableBytesPlayback = 0;
+ }
+ }
+
+ /* If there's no room available for writing we need to wait for more. */
+ if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) {
+ /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
+ if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ } else {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
+ }
+
+ /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
+ lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. Go up to the end of the buffer. */
+ lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* Different loop iterations. Go up to the physical play cursor. */
+ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ }
+
+ hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.");
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+
+ /* At this point we have a buffer for output. */
+ ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback);
+
+ hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.");
+ result = ma_result_from_HRESULT(hr);
+ break;
+ }
+
+ virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback;
+ if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) {
+ virtualWriteCursorInBytesPlayback = 0;
+ virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
+ }
+
+ /*
+ We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
+ a bit of a buffer to prevent the playback buffer from getting starved.
+ */
+ framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback;
+ if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ }
+ } break;
+
+
+ default: return MA_INVALID_ARGS; /* Invalid device type. */
+ }
+
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ /* Getting here means the device is being stopped. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ hr = ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */
+ if (isPlaybackDeviceStarted) {
+ for (;;) {
+ DWORD availableBytesPlayback = 0;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes);
+ if (FAILED(hr)) {
+ break;
+ }
+
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ break;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ break;
+ }
+ }
+
+ if (availableBytesPlayback >= (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback)) {
+ break;
+ }
+
+ ma_sleep(waitTimeInMilliseconds);
+ }
+ }
+
+ hr = ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer);
+ if (FAILED(hr)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed.");
+ return ma_result_from_HRESULT(hr);
+ }
+
+ ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_uninit__dsound(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_dsound);
+
+ ma_dlclose(pContext, pContext->dsound.hDSoundDLL);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__dsound(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+
+ pContext->dsound.hDSoundDLL = ma_dlopen(pContext, "dsound.dll");
+ if (pContext->dsound.hDSoundDLL == NULL) {
+ return MA_API_NOT_FOUND;
+ }
+
+ pContext->dsound.DirectSoundCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCreate");
+ pContext->dsound.DirectSoundEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA");
+ pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate");
+ pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA");
+
+ pCallbacks->onContextInit = ma_context_init__dsound;
+ pCallbacks->onContextUninit = ma_context_uninit__dsound;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__dsound;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__dsound;
+ pCallbacks->onDeviceInit = ma_device_init__dsound;
+ pCallbacks->onDeviceUninit = ma_device_uninit__dsound;
+ pCallbacks->onDeviceStart = NULL; /* Not used. Started in onDeviceDataLoop. */
+ pCallbacks->onDeviceStop = NULL; /* Not used. Stopped in onDeviceDataLoop. */
+ pCallbacks->onDeviceRead = NULL; /* Not used. Data is read directly in onDeviceDataLoop. */
+ pCallbacks->onDeviceWrite = NULL; /* Not used. Data is written directly in onDeviceDataLoop. */
+ pCallbacks->onDeviceDataLoop = ma_device_data_loop__dsound;
+
+ return MA_SUCCESS;
+}
+#endif
+
+
+
+/******************************************************************************
+
+WinMM Backend
+
+******************************************************************************/
+#ifdef MA_HAS_WINMM
+
+/*
+Some older compilers don't have WAVEOUTCAPS2A and WAVEINCAPS2A, so we'll need to write this ourselves. These structures
+are exactly the same as the older ones but they have a few GUIDs for manufacturer/product/name identification. I'm keeping
+the names the same as the Win32 library for consistency, but namespaced to avoid naming conflicts with the Win32 version.
+*/
+typedef struct
+{
+ WORD wMid;
+ WORD wPid;
+ MMVERSION vDriverVersion;
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ WORD wReserved1;
+ DWORD dwSupport;
+ GUID ManufacturerGuid;
+ GUID ProductGuid;
+ GUID NameGuid;
+} MA_WAVEOUTCAPS2A;
+typedef struct
+{
+ WORD wMid;
+ WORD wPid;
+ MMVERSION vDriverVersion;
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ WORD wReserved1;
+ GUID ManufacturerGuid;
+ GUID ProductGuid;
+ GUID NameGuid;
+} MA_WAVEINCAPS2A;
+
+typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEOUTCAPSA pwoc, UINT cbwoc);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutOpen)(LPHWAVEOUT phwo, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutClose)(HWAVEOUT hwo);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutWrite)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutReset)(HWAVEOUT hwo);
+typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void);
+typedef MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEINCAPSA pwic, UINT cbwic);
+typedef MMRESULT (WINAPI * MA_PFN_waveInOpen)(LPHWAVEIN phwi, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
+typedef MMRESULT (WINAPI * MA_PFN_waveInClose)(HWAVEIN hwi);
+typedef MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInStart)(HWAVEIN hwi);
+typedef MMRESULT (WINAPI * MA_PFN_waveInReset)(HWAVEIN hwi);
+
+static ma_result ma_result_from_MMRESULT(MMRESULT resultMM)
+{
+ switch (resultMM) {
+ case MMSYSERR_NOERROR: return MA_SUCCESS;
+ case MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS;
+ case MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS;
+ case MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY;
+ case MMSYSERR_INVALFLAG: return MA_INVALID_ARGS;
+ case MMSYSERR_INVALPARAM: return MA_INVALID_ARGS;
+ case MMSYSERR_HANDLEBUSY: return MA_BUSY;
+ case MMSYSERR_ERROR: return MA_ERROR;
+ default: return MA_ERROR;
+ }
+}
+
+static char* ma_find_last_character(char* str, char ch)
+{
+ char* last;
+
+ if (str == NULL) {
+ return NULL;
+ }
+
+ last = NULL;
+ while (*str != '\0') {
+ if (*str == ch) {
+ last = str;
+ }
+
+ str += 1;
+ }
+
+ return last;
+}
+
+static ma_uint32 ma_get_period_size_in_bytes(ma_uint32 periodSizeInFrames, ma_format format, ma_uint32 channels)
+{
+ return periodSizeInFrames * ma_get_bytes_per_frame(format, channels);
+}
+
+
+/*
+Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so
+we can do things generically and typesafely. Names are being kept the same for consistency.
+*/
+typedef struct
+{
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ GUID NameGuid;
+} MA_WAVECAPSA;
+
+static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate)
+{
+ WORD bitsPerSample = 0;
+ DWORD sampleRate = 0;
+
+ if (pBitsPerSample) {
+ *pBitsPerSample = 0;
+ }
+ if (pSampleRate) {
+ *pSampleRate = 0;
+ }
+
+ if (channels == 1) {
+ bitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48M16) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48M08) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) {
+ sampleRate = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ bitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48S16) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48S08) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) {
+ sampleRate = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ if (pBitsPerSample) {
+ *pBitsPerSample = bitsPerSample;
+ }
+ if (pSampleRate) {
+ *pSampleRate = sampleRate;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF)
+{
+ ma_result result;
+
+ MA_ASSERT(pWF != NULL);
+
+ MA_ZERO_OBJECT(pWF);
+ pWF->cbSize = sizeof(*pWF);
+ pWF->wFormatTag = WAVE_FORMAT_PCM;
+ pWF->nChannels = (WORD)channels;
+ if (pWF->nChannels > 2) {
+ pWF->nChannels = 2;
+ }
+
+ result = ma_get_best_info_from_formats_flags__winmm(dwFormats, channels, &pWF->wBitsPerSample, &pWF->nSamplesPerSec);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pWF->nBlockAlign = (WORD)(pWF->nChannels * pWF->wBitsPerSample / 8);
+ pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec;
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo)
+{
+ WORD bitsPerSample;
+ DWORD sampleRate;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
+
+ /*
+ Name / Description
+
+ Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking
+ situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try
+ looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name.
+ */
+
+ /* Set the default to begin with. */
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1);
+
+ /*
+ Now try the registry. There's a few things to consider here:
+ - The name GUID can be null, in which we case we just need to stick to the original 31 characters.
+ - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters.
+ - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The
+ problem, however is that WASAPI and DirectSound use "<component> (<name>)" format (such as "Speakers (High Definition Audio)"),
+ but WinMM does not specificy the component name. From my admittedly limited testing, I've notice the component name seems to
+ usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component
+ name, and then concatenate the name from the registry.
+ */
+ if (!ma_is_guid_null(&pCaps->NameGuid)) {
+ wchar_t guidStrW[256];
+ if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) {
+ char guidStr[256];
+ char keyStr[1024];
+ HKEY hKey;
+
+ WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE);
+
+ ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\");
+ ma_strcat_s(keyStr, sizeof(keyStr), guidStr);
+
+ if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) {
+ BYTE nameFromReg[512];
+ DWORD nameFromRegSize = sizeof(nameFromReg);
+ LONG resultWin32 = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (LPBYTE)nameFromReg, (LPDWORD)&nameFromRegSize);
+ ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey);
+
+ if (resultWin32 == ERROR_SUCCESS) {
+ /* We have the value from the registry, so now we need to construct the name string. */
+ char name[1024];
+ if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) {
+ char* nameBeg = ma_find_last_character(name, '(');
+ if (nameBeg != NULL) {
+ size_t leadingLen = (nameBeg - name);
+ ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1);
+
+ /* The closing ")", if it can fit. */
+ if (leadingLen + nameFromRegSize < sizeof(name)-1) {
+ ma_strcat_s(name, sizeof(name), ")");
+ }
+
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1);
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (bitsPerSample == 8) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_u8;
+ } else if (bitsPerSample == 16) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s16;
+ } else if (bitsPerSample == 24) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s24;
+ } else if (bitsPerSample == 32) {
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_s32;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ pDeviceInfo->nativeDataFormats[0].channels = pCaps->wChannels;
+ pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate;
+ pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pDeviceInfo->nativeDataFormatCount = 1;
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo)
+{
+ MA_WAVECAPSA caps;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
+
+ MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
+ caps.dwFormats = pCaps->dwFormats;
+ caps.wChannels = pCaps->wChannels;
+ caps.NameGuid = pCaps->NameGuid;
+ return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
+}
+
+static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo)
+{
+ MA_WAVECAPSA caps;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
+
+ MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
+ caps.dwFormats = pCaps->dwFormats;
+ caps.wChannels = pCaps->wChannels;
+ caps.NameGuid = pCaps->NameGuid;
+ return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
+}
+
+
+static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ UINT playbackDeviceCount;
+ UINT captureDeviceCount;
+ UINT iPlaybackDevice;
+ UINT iCaptureDevice;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Playback. */
+ playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)();
+ for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) {
+ MMRESULT result;
+ MA_WAVEOUTCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (WAVEOUTCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ ma_device_info deviceInfo;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.winmm = iPlaybackDevice;
+
+ /* The first enumerated device is the default device. */
+ if (iPlaybackDevice == 0) {
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+ if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ return MA_SUCCESS; /* Enumeration was stopped. */
+ }
+ }
+ }
+ }
+
+ /* Capture. */
+ captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)();
+ for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) {
+ MMRESULT result;
+ MA_WAVEINCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (WAVEINCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ ma_device_info deviceInfo;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.winmm = iCaptureDevice;
+
+ /* The first enumerated device is the default device. */
+ if (iCaptureDevice == 0) {
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+ if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ return MA_SUCCESS; /* Enumeration was stopped. */
+ }
+ }
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ UINT winMMDeviceID;
+
+ MA_ASSERT(pContext != NULL);
+
+ winMMDeviceID = 0;
+ if (pDeviceID != NULL) {
+ winMMDeviceID = (UINT)pDeviceID->winmm;
+ }
+
+ pDeviceInfo->id.winmm = winMMDeviceID;
+
+ /* The first ID is the default device. */
+ if (winMMDeviceID == 0) {
+ pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ MMRESULT result;
+ MA_WAVEOUTCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (WAVEOUTCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo);
+ }
+ } else {
+ MMRESULT result;
+ MA_WAVEINCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (WAVEINCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo);
+ }
+ }
+
+ return MA_NO_DEVICE;
+}
+
+
+static ma_result ma_device_uninit__winmm(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ CloseHandle((HANDLE)pDevice->winmm.hEventCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ CloseHandle((HANDLE)pDevice->winmm.hEventPlayback);
+ }
+
+ ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks);
+
+ MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */
+
+ return MA_SUCCESS;
+}
+
+static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__winmm(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile)
+{
+ /* WinMM has a minimum period size of 40ms. */
+ ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(40, nativeSampleRate);
+ ma_uint32 periodSizeInFrames;
+
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile);
+ if (periodSizeInFrames < minPeriodSizeInFrames) {
+ periodSizeInFrames = minPeriodSizeInFrames;
+ }
+
+ return periodSizeInFrames;
+}
+
+static ma_result ma_device_init__winmm(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ const char* errorMsg = "";
+ ma_result errorCode = MA_ERROR;
+ ma_result result = MA_SUCCESS;
+ ma_uint32 heapSize;
+ UINT winMMDeviceIDPlayback = 0;
+ UINT winMMDeviceIDCapture = 0;
+
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->winmm);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* No exlusive mode with WinMM. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ if (pDescriptorPlayback->pDeviceID != NULL) {
+ winMMDeviceIDPlayback = (UINT)pDescriptorPlayback->pDeviceID->winmm;
+ }
+ if (pDescriptorCapture->pDeviceID != NULL) {
+ winMMDeviceIDCapture = (UINT)pDescriptorCapture->pDeviceID->winmm;
+ }
+
+ /* The capture device needs to be initialized first. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEINCAPSA caps;
+ WAVEFORMATEX wf;
+ MMRESULT resultMM;
+
+ /* We use an event to know when a new fragment needs to be enqueued. */
+ pDevice->winmm.hEventCapture = (ma_handle)CreateEventW(NULL, TRUE, TRUE, NULL);
+ if (pDevice->winmm.hEventCapture == NULL) {
+ errorMsg = "[WinMM] Failed to create event for fragment enqueing for the capture device.", errorCode = ma_result_from_GetLastError(GetLastError());
+ goto on_error;
+ }
+
+ /* The format should be based on the device's actual format. */
+ if (((MA_PFN_waveInGetDevCapsA)pDevice->pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
+ goto on_error;
+ }
+
+ result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
+ goto on_error;
+ }
+
+ resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((LPHWAVEIN)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
+ if (resultMM != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ goto on_error;
+ }
+
+ pDescriptorCapture->format = ma_format_from_WAVEFORMATEX(&wf);
+ pDescriptorCapture->channels = wf.nChannels;
+ pDescriptorCapture->sampleRate = wf.nSamplesPerSec;
+ ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels);
+ pDescriptorCapture->periodCount = pDescriptorCapture->periodCount;
+ pDescriptorCapture->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile);
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEOUTCAPSA caps;
+ WAVEFORMATEX wf;
+ MMRESULT resultMM;
+
+ /* We use an event to know when a new fragment needs to be enqueued. */
+ pDevice->winmm.hEventPlayback = (ma_handle)CreateEventW(NULL, TRUE, TRUE, NULL);
+ if (pDevice->winmm.hEventPlayback == NULL) {
+ errorMsg = "[WinMM] Failed to create event for fragment enqueing for the playback device.", errorCode = ma_result_from_GetLastError(GetLastError());
+ goto on_error;
+ }
+
+ /* The format should be based on the device's actual format. */
+ if (((MA_PFN_waveOutGetDevCapsA)pDevice->pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
+ goto on_error;
+ }
+
+ result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
+ goto on_error;
+ }
+
+ resultMM = ((MA_PFN_waveOutOpen)pDevice->pContext->winmm.waveOutOpen)((LPHWAVEOUT)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
+ if (resultMM != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ goto on_error;
+ }
+
+ pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX(&wf);
+ pDescriptorPlayback->channels = wf.nChannels;
+ pDescriptorPlayback->sampleRate = wf.nSamplesPerSec;
+ ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels);
+ pDescriptorPlayback->periodCount = pDescriptorPlayback->periodCount;
+ pDescriptorPlayback->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile);
+ }
+
+ /*
+ The heap allocated data is allocated like so:
+
+ [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer]
+ */
+ heapSize = 0;
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ heapSize += sizeof(WAVEHDR)*pDescriptorCapture->periodCount + (pDescriptorCapture->periodSizeInFrames * pDescriptorCapture->periodCount * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels));
+ }
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ heapSize += sizeof(WAVEHDR)*pDescriptorPlayback->periodCount + (pDescriptorPlayback->periodSizeInFrames * pDescriptorPlayback->periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels));
+ }
+
+ pDevice->winmm._pHeapData = (ma_uint8*)ma_calloc(heapSize, &pDevice->pContext->allocationCallbacks);
+ if (pDevice->winmm._pHeapData == NULL) {
+ errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY;
+ goto on_error;
+ }
+
+ MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize);
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
+
+ if (pConfig->deviceType == ma_device_type_capture) {
+ pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDescriptorCapture->periodCount));
+ } else {
+ pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount));
+ }
+
+ /* Prepare headers. */
+ for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) {
+ ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->format, pDescriptorCapture->channels);
+
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferCapture + (periodSizeInBytes*iPeriod));
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = periodSizeInBytes;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L;
+ ((MA_PFN_waveInPrepareHeader)pDevice->pContext->winmm.waveInPrepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+
+ /*
+ The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
+ it's unlocked and available for writing. A value of 1 means it's locked.
+ */
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
+
+ if (pConfig->deviceType == ma_device_type_playback) {
+ pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*pDescriptorPlayback->periodCount);
+ } else {
+ pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDescriptorCapture->periodCount));
+ pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount)) + (pDescriptorCapture->periodSizeInFrames*pDescriptorCapture->periodCount*ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels));
+ }
+
+ /* Prepare headers. */
+ for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) {
+ ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->format, pDescriptorPlayback->channels);
+
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferPlayback + (periodSizeInBytes*iPeriod));
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = periodSizeInBytes;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L;
+ ((MA_PFN_waveOutPrepareHeader)pDevice->pContext->winmm.waveOutPrepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
+
+ /*
+ The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
+ it's unlocked and available for writing. A value of 1 means it's locked.
+ */
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0;
+ }
+ }
+
+ return MA_SUCCESS;
+
+on_error:
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.pWAVEHDRCapture != NULL) {
+ ma_uint32 iPeriod;
+ for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) {
+ ((MA_PFN_waveInUnprepareHeader)pDevice->pContext->winmm.waveInUnprepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+ }
+ }
+
+ ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.pWAVEHDRCapture != NULL) {
+ ma_uint32 iPeriod;
+ for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) {
+ ((MA_PFN_waveOutUnprepareHeader)pDevice->pContext->winmm.waveOutUnprepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
+ }
+ }
+
+ ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ }
+
+ ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks);
+
+ if (errorMsg != NULL && errorMsg[0] != '\0') {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "%s", errorMsg);
+ }
+
+ return errorCode;
+}
+
+static ma_result ma_device_start__winmm(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ MMRESULT resultMM;
+ WAVEHDR* pWAVEHDR;
+ ma_uint32 iPeriod;
+
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture;
+
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
+
+ /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */
+ for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
+ resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture.");
+ return ma_result_from_MMRESULT(resultMM);
+ }
+
+ /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */
+ pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */
+ }
+
+ /* Capture devices need to be explicitly started, unlike playback devices. */
+ resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device.");
+ return ma_result_from_MMRESULT(resultMM);
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* Don't need to do anything for playback. It'll be started automatically in ma_device_start__winmm(). */
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__winmm(ma_device* pDevice)
+{
+ MMRESULT resultMM;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.hDeviceCapture == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset capture device.");
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
+ WAVEHDR* pWAVEHDR;
+
+ if (pDevice->winmm.hDevicePlayback == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ /* We need to drain the device. To do this we just loop over each header and if it's locked just wait for the event. */
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback;
+ for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; iPeriod += 1) {
+ if (pWAVEHDR[iPeriod].dwUser == 1) { /* 1 = locked. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) {
+ break; /* An error occurred so just abandon ship and stop the device without draining. */
+ }
+
+ pWAVEHDR[iPeriod].dwUser = 0;
+ }
+ }
+
+ resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset playback device.");
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_result result = MA_SUCCESS;
+ MMRESULT resultMM;
+ ma_uint32 totalFramesWritten;
+ WAVEHDR* pWAVEHDR;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback;
+
+ /* Keep processing as much data as possible. */
+ totalFramesWritten = 0;
+ while (totalFramesWritten < frameCount) {
+ /* If the current header has some space available we need to write part of it. */
+ if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */
+ /*
+ This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to
+ write it out and move on to the next iteration.
+ */
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback;
+
+ ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten));
+ const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf);
+ void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf);
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf);
+
+ pDevice->winmm.headerFramesConsumedPlayback += framesToCopy;
+ totalFramesWritten += framesToCopy;
+
+ /* If we've consumed the buffer entirely we need to write it out to the device. */
+ if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
+
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventPlayback);
+
+ /* The device will be started here. */
+ resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ result = ma_result_from_MMRESULT(resultMM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed.");
+ break;
+ }
+
+ /* Make sure we move to the next header. */
+ pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods;
+ pDevice->winmm.headerFramesConsumedPlayback = 0;
+ }
+
+ /* If at this point we have consumed the entire input buffer we can return. */
+ MA_ASSERT(totalFramesWritten <= frameCount);
+ if (totalFramesWritten == frameCount) {
+ break;
+ }
+
+ /* Getting here means there's more to process. */
+ continue;
+ }
+
+ /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break;
+ }
+
+ /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
+ if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & WHDR_DONE) != 0) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */
+ pDevice->winmm.headerFramesConsumedPlayback = 0;
+ }
+
+ /* If the device has been stopped we need to break. */
+ if (ma_device_get_state(pDevice) != ma_device_state_started) {
+ break;
+ }
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalFramesWritten;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_result result = MA_SUCCESS;
+ MMRESULT resultMM;
+ ma_uint32 totalFramesRead;
+ WAVEHDR* pWAVEHDR;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
+
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture;
+
+ /* Keep processing as much data as possible. */
+ totalFramesRead = 0;
+ while (totalFramesRead < frameCount) {
+ /* If the current header has some space available we need to write part of it. */
+ if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */
+ /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture;
+
+ ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead));
+ const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf);
+ void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf);
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf);
+
+ pDevice->winmm.headerFramesConsumedCapture += framesToCopy;
+ totalFramesRead += framesToCopy;
+
+ /* If we've consumed the buffer entirely we need to add it back to the device. */
+ if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
+
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
+
+ /* The device will be started here. */
+ resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ result = ma_result_from_MMRESULT(resultMM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed.");
+ break;
+ }
+
+ /* Make sure we move to the next header. */
+ pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods;
+ pDevice->winmm.headerFramesConsumedCapture = 0;
+ }
+
+ /* If at this point we have filled the entire input buffer we can return. */
+ MA_ASSERT(totalFramesRead <= frameCount);
+ if (totalFramesRead == frameCount) {
+ break;
+ }
+
+ /* Getting here means there's more to process. */
+ continue;
+ }
+
+ /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break;
+ }
+
+ /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
+ if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & WHDR_DONE) != 0) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */
+ pDevice->winmm.headerFramesConsumedCapture = 0;
+ }
+
+ /* If the device has been stopped we need to break. */
+ if (ma_device_get_state(pDevice) != ma_device_state_started) {
+ break;
+ }
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalFramesRead;
+ }
+
+ return result;
+}
+
+static ma_result ma_context_uninit__winmm(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_winmm);
+
+ ma_dlclose(pContext, pContext->winmm.hWinMM);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__winmm(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+
+ pContext->winmm.hWinMM = ma_dlopen(pContext, "winmm.dll");
+ if (pContext->winmm.hWinMM == NULL) {
+ return MA_NO_BACKEND;
+ }
+
+ pContext->winmm.waveOutGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetNumDevs");
+ pContext->winmm.waveOutGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetDevCapsA");
+ pContext->winmm.waveOutOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutOpen");
+ pContext->winmm.waveOutClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutClose");
+ pContext->winmm.waveOutPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutPrepareHeader");
+ pContext->winmm.waveOutUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutUnprepareHeader");
+ pContext->winmm.waveOutWrite = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutWrite");
+ pContext->winmm.waveOutReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutReset");
+ pContext->winmm.waveInGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetNumDevs");
+ pContext->winmm.waveInGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetDevCapsA");
+ pContext->winmm.waveInOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInOpen");
+ pContext->winmm.waveInClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInClose");
+ pContext->winmm.waveInPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInPrepareHeader");
+ pContext->winmm.waveInUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInUnprepareHeader");
+ pContext->winmm.waveInAddBuffer = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInAddBuffer");
+ pContext->winmm.waveInStart = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInStart");
+ pContext->winmm.waveInReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInReset");
+
+ pCallbacks->onContextInit = ma_context_init__winmm;
+ pCallbacks->onContextUninit = ma_context_uninit__winmm;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__winmm;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__winmm;
+ pCallbacks->onDeviceInit = ma_device_init__winmm;
+ pCallbacks->onDeviceUninit = ma_device_uninit__winmm;
+ pCallbacks->onDeviceStart = ma_device_start__winmm;
+ pCallbacks->onDeviceStop = ma_device_stop__winmm;
+ pCallbacks->onDeviceRead = ma_device_read__winmm;
+ pCallbacks->onDeviceWrite = ma_device_write__winmm;
+ pCallbacks->onDeviceDataLoop = NULL; /* This is a blocking read-write API, so this can be NULL since miniaudio will manage the audio thread for us. */
+
+ return MA_SUCCESS;
+}
+#endif
+
+
+
+
+/******************************************************************************
+
+ALSA Backend
+
+******************************************************************************/
+#ifdef MA_HAS_ALSA
+
+#include <poll.h> /* poll(), struct pollfd */
+#include <sys/eventfd.h> /* eventfd() */
+
+#ifdef MA_NO_RUNTIME_LINKING
+
+/* asoundlib.h marks some functions with "inline" which isn't always supported. Need to emulate it. */
+#if !defined(__cplusplus)
+ #if defined(__STRICT_ANSI__)
+ #if !defined(inline)
+ #define inline __inline__ __attribute__((always_inline))
+ #define MA_INLINE_DEFINED
+ #endif
+ #endif
+#endif
+#include <alsa/asoundlib.h>
+#if defined(MA_INLINE_DEFINED)
+ #undef inline
+ #undef MA_INLINE_DEFINED
+#endif
+
+typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t;
+typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t;
+typedef snd_pcm_stream_t ma_snd_pcm_stream_t;
+typedef snd_pcm_format_t ma_snd_pcm_format_t;
+typedef snd_pcm_access_t ma_snd_pcm_access_t;
+typedef snd_pcm_t ma_snd_pcm_t;
+typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
+typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
+typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
+typedef snd_pcm_info_t ma_snd_pcm_info_t;
+typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t;
+typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t;
+typedef snd_pcm_state_t ma_snd_pcm_state_t;
+
+/* snd_pcm_stream_t */
+#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK
+#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE
+
+/* snd_pcm_format_t */
+#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN
+#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8
+#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE
+#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE
+#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE
+#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE
+#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE
+#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE
+#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE
+#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE
+#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE
+#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE
+#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW
+#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW
+#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE
+#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE
+
+/* ma_snd_pcm_access_t */
+#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED
+#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED
+#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX
+#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED
+#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED
+
+/* Channel positions. */
+#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN
+#define MA_SND_CHMAP_NA SND_CHMAP_NA
+#define MA_SND_CHMAP_MONO SND_CHMAP_MONO
+#define MA_SND_CHMAP_FL SND_CHMAP_FL
+#define MA_SND_CHMAP_FR SND_CHMAP_FR
+#define MA_SND_CHMAP_RL SND_CHMAP_RL
+#define MA_SND_CHMAP_RR SND_CHMAP_RR
+#define MA_SND_CHMAP_FC SND_CHMAP_FC
+#define MA_SND_CHMAP_LFE SND_CHMAP_LFE
+#define MA_SND_CHMAP_SL SND_CHMAP_SL
+#define MA_SND_CHMAP_SR SND_CHMAP_SR
+#define MA_SND_CHMAP_RC SND_CHMAP_RC
+#define MA_SND_CHMAP_FLC SND_CHMAP_FLC
+#define MA_SND_CHMAP_FRC SND_CHMAP_FRC
+#define MA_SND_CHMAP_RLC SND_CHMAP_RLC
+#define MA_SND_CHMAP_RRC SND_CHMAP_RRC
+#define MA_SND_CHMAP_FLW SND_CHMAP_FLW
+#define MA_SND_CHMAP_FRW SND_CHMAP_FRW
+#define MA_SND_CHMAP_FLH SND_CHMAP_FLH
+#define MA_SND_CHMAP_FCH SND_CHMAP_FCH
+#define MA_SND_CHMAP_FRH SND_CHMAP_FRH
+#define MA_SND_CHMAP_TC SND_CHMAP_TC
+#define MA_SND_CHMAP_TFL SND_CHMAP_TFL
+#define MA_SND_CHMAP_TFR SND_CHMAP_TFR
+#define MA_SND_CHMAP_TFC SND_CHMAP_TFC
+#define MA_SND_CHMAP_TRL SND_CHMAP_TRL
+#define MA_SND_CHMAP_TRR SND_CHMAP_TRR
+#define MA_SND_CHMAP_TRC SND_CHMAP_TRC
+#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC
+#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC
+#define MA_SND_CHMAP_TSL SND_CHMAP_TSL
+#define MA_SND_CHMAP_TSR SND_CHMAP_TSR
+#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE
+#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE
+#define MA_SND_CHMAP_BC SND_CHMAP_BC
+#define MA_SND_CHMAP_BLC SND_CHMAP_BLC
+#define MA_SND_CHMAP_BRC SND_CHMAP_BRC
+
+/* Open mode flags. */
+#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE
+#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS
+#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT
+#else
+#include <errno.h> /* For EPIPE, etc. */
+typedef unsigned long ma_snd_pcm_uframes_t;
+typedef long ma_snd_pcm_sframes_t;
+typedef int ma_snd_pcm_stream_t;
+typedef int ma_snd_pcm_format_t;
+typedef int ma_snd_pcm_access_t;
+typedef int ma_snd_pcm_state_t;
+typedef struct ma_snd_pcm_t ma_snd_pcm_t;
+typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
+typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
+typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
+typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t;
+typedef struct
+{
+ void* addr;
+ unsigned int first;
+ unsigned int step;
+} ma_snd_pcm_channel_area_t;
+typedef struct
+{
+ unsigned int channels;
+ unsigned int pos[1];
+} ma_snd_pcm_chmap_t;
+
+/* snd_pcm_state_t */
+#define MA_SND_PCM_STATE_OPEN 0
+#define MA_SND_PCM_STATE_SETUP 1
+#define MA_SND_PCM_STATE_PREPARED 2
+#define MA_SND_PCM_STATE_RUNNING 3
+#define MA_SND_PCM_STATE_XRUN 4
+#define MA_SND_PCM_STATE_DRAINING 5
+#define MA_SND_PCM_STATE_PAUSED 6
+#define MA_SND_PCM_STATE_SUSPENDED 7
+#define MA_SND_PCM_STATE_DISCONNECTED 8
+
+/* snd_pcm_stream_t */
+#define MA_SND_PCM_STREAM_PLAYBACK 0
+#define MA_SND_PCM_STREAM_CAPTURE 1
+
+/* snd_pcm_format_t */
+#define MA_SND_PCM_FORMAT_UNKNOWN -1
+#define MA_SND_PCM_FORMAT_U8 1
+#define MA_SND_PCM_FORMAT_S16_LE 2
+#define MA_SND_PCM_FORMAT_S16_BE 3
+#define MA_SND_PCM_FORMAT_S24_LE 6
+#define MA_SND_PCM_FORMAT_S24_BE 7
+#define MA_SND_PCM_FORMAT_S32_LE 10
+#define MA_SND_PCM_FORMAT_S32_BE 11
+#define MA_SND_PCM_FORMAT_FLOAT_LE 14
+#define MA_SND_PCM_FORMAT_FLOAT_BE 15
+#define MA_SND_PCM_FORMAT_FLOAT64_LE 16
+#define MA_SND_PCM_FORMAT_FLOAT64_BE 17
+#define MA_SND_PCM_FORMAT_MU_LAW 20
+#define MA_SND_PCM_FORMAT_A_LAW 21
+#define MA_SND_PCM_FORMAT_S24_3LE 32
+#define MA_SND_PCM_FORMAT_S24_3BE 33
+
+/* snd_pcm_access_t */
+#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0
+#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1
+#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2
+#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3
+#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4
+
+/* Channel positions. */
+#define MA_SND_CHMAP_UNKNOWN 0
+#define MA_SND_CHMAP_NA 1
+#define MA_SND_CHMAP_MONO 2
+#define MA_SND_CHMAP_FL 3
+#define MA_SND_CHMAP_FR 4
+#define MA_SND_CHMAP_RL 5
+#define MA_SND_CHMAP_RR 6
+#define MA_SND_CHMAP_FC 7
+#define MA_SND_CHMAP_LFE 8
+#define MA_SND_CHMAP_SL 9
+#define MA_SND_CHMAP_SR 10
+#define MA_SND_CHMAP_RC 11
+#define MA_SND_CHMAP_FLC 12
+#define MA_SND_CHMAP_FRC 13
+#define MA_SND_CHMAP_RLC 14
+#define MA_SND_CHMAP_RRC 15
+#define MA_SND_CHMAP_FLW 16
+#define MA_SND_CHMAP_FRW 17
+#define MA_SND_CHMAP_FLH 18
+#define MA_SND_CHMAP_FCH 19
+#define MA_SND_CHMAP_FRH 20
+#define MA_SND_CHMAP_TC 21
+#define MA_SND_CHMAP_TFL 22
+#define MA_SND_CHMAP_TFR 23
+#define MA_SND_CHMAP_TFC 24
+#define MA_SND_CHMAP_TRL 25
+#define MA_SND_CHMAP_TRR 26
+#define MA_SND_CHMAP_TRC 27
+#define MA_SND_CHMAP_TFLC 28
+#define MA_SND_CHMAP_TFRC 29
+#define MA_SND_CHMAP_TSL 30
+#define MA_SND_CHMAP_TSR 31
+#define MA_SND_CHMAP_LLFE 32
+#define MA_SND_CHMAP_RLFE 33
+#define MA_SND_CHMAP_BC 34
+#define MA_SND_CHMAP_BLC 35
+#define MA_SND_CHMAP_BRC 36
+
+/* Open mode flags. */
+#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000
+#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000
+#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000
+#endif
+
+typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode);
+typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm);
+typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
+typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val);
+typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
+typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask);
+typedef int (* ma_snd_pcm_hw_params_set_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val);
+typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_set_channels_minmax_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *minimum, unsigned int *maximum);
+typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val);
+typedef int (* ma_snd_pcm_hw_params_set_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir);
+typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
+typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access);
+typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
+typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
+typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access);
+typedef int (* ma_snd_pcm_hw_params_test_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val);
+typedef int (* ma_snd_pcm_hw_params_test_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val);
+typedef int (* ma_snd_pcm_hw_params_test_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir);
+typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
+typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
+typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (const ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val);
+typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
+typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val);
+typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm);
+typedef ma_snd_pcm_state_t (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_reset_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints);
+typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id);
+typedef int (* ma_snd_card_get_index_proc) (const char *name);
+typedef int (* ma_snd_device_name_free_hint_proc) (void **hints);
+typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames);
+typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout);
+typedef int (* ma_snd_pcm_nonblock_proc) (ma_snd_pcm_t *pcm, int nonblock);
+typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info);
+typedef size_t (* ma_snd_pcm_info_sizeof_proc) (void);
+typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info);
+typedef int (* ma_snd_pcm_poll_descriptors_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int space);
+typedef int (* ma_snd_pcm_poll_descriptors_count_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_poll_descriptors_revents_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int nfds, unsigned short *revents);
+typedef int (* ma_snd_config_update_free_global_proc) (void);
+
+/* This array specifies each of the common devices that can be used for both playback and capture. */
+static const char* g_maCommonDeviceNamesALSA[] = {
+ "default",
+ "null",
+ "pulse",
+ "jack"
+};
+
+/* This array allows us to blacklist specific playback devices. */
+static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = {
+ ""
+};
+
+/* This array allows us to blacklist specific capture devices. */
+static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = {
+ ""
+};
+
+
+static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format)
+{
+ ma_snd_pcm_format_t ALSAFormats[] = {
+ MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */
+ MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */
+ MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */
+ MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */
+ MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */
+ MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */
+ };
+
+ if (ma_is_big_endian()) {
+ ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN;
+ ALSAFormats[1] = MA_SND_PCM_FORMAT_U8;
+ ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE;
+ ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE;
+ ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE;
+ ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE;
+ }
+
+ return ALSAFormats[format];
+}
+
+static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA)
+{
+ if (ma_is_little_endian()) {
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16;
+ case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24;
+ case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32;
+ case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32;
+ default: break;
+ }
+ } else {
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16;
+ case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24;
+ case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32;
+ case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32;
+ default: break;
+ }
+ }
+
+ /* Endian agnostic. */
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_U8: return ma_format_u8;
+ default: return ma_format_unknown;
+ }
+}
+
+static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos)
+{
+ switch (alsaChannelPos)
+ {
+ case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO;
+ case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT;
+ case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT;
+ case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT;
+ case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT;
+ case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER;
+ case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE;
+ case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT;
+ case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT;
+ case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER;
+ case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case MA_SND_CHMAP_RLC: return 0;
+ case MA_SND_CHMAP_RRC: return 0;
+ case MA_SND_CHMAP_FLW: return 0;
+ case MA_SND_CHMAP_FRW: return 0;
+ case MA_SND_CHMAP_FLH: return 0;
+ case MA_SND_CHMAP_FCH: return 0;
+ case MA_SND_CHMAP_FRH: return 0;
+ case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER;
+ case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT;
+ case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER;
+ default: break;
+ }
+
+ return 0;
+}
+
+static ma_bool32 ma_is_common_device_name__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
+
+ return MA_FALSE;
+}
+
+
+static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
+
+ return MA_FALSE;
+}
+
+static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
+
+ return MA_FALSE;
+}
+
+static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name)
+{
+ if (deviceType == ma_device_type_playback) {
+ return ma_is_playback_device_blacklisted__alsa(name);
+ } else {
+ return ma_is_capture_device_blacklisted__alsa(name);
+ }
+}
+
+
+static const char* ma_find_char(const char* str, char c, int* index)
+{
+ int i = 0;
+ for (;;) {
+ if (str[i] == '\0') {
+ if (index) *index = -1;
+ return NULL;
+ }
+
+ if (str[i] == c) {
+ if (index) *index = i;
+ return str + i;
+ }
+
+ i += 1;
+ }
+
+ /* Should never get here, but treat it as though the character was not found to make me feel better inside. */
+ if (index) *index = -1;
+ return NULL;
+}
+
+static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid)
+{
+ /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */
+
+ int commaPos;
+ const char* dev;
+ int i;
+
+ if (hwid == NULL) {
+ return MA_FALSE;
+ }
+
+ if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') {
+ return MA_FALSE;
+ }
+
+ hwid += 3;
+
+ dev = ma_find_char(hwid, ',', &commaPos);
+ if (dev == NULL) {
+ return MA_FALSE;
+ } else {
+ dev += 1; /* Skip past the ",". */
+ }
+
+ /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */
+ for (i = 0; i < commaPos; ++i) {
+ if (hwid[i] < '0' || hwid[i] > '9') {
+ return MA_FALSE;
+ }
+ }
+
+ /* Check if everything after the "," is numeric. If not, return false. */
+ i = 0;
+ while (dev[i] != '\0') {
+ if (dev[i] < '0' || dev[i] > '9') {
+ return MA_FALSE;
+ }
+ i += 1;
+ }
+
+ return MA_TRUE;
+}
+
+static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */
+{
+ /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */
+
+ int colonPos;
+ int commaPos;
+ char card[256];
+ const char* dev;
+ int cardIndex;
+
+ if (dst == NULL) {
+ return -1;
+ }
+ if (dstSize < 7) {
+ return -1; /* Absolute minimum size of the output buffer is 7 bytes. */
+ }
+
+ *dst = '\0'; /* Safety. */
+ if (src == NULL) {
+ return -1;
+ }
+
+ /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */
+ if (ma_is_device_name_in_hw_format__alsa(src)) {
+ return ma_strcpy_s(dst, dstSize, src);
+ }
+
+ src = ma_find_char(src, ':', &colonPos);
+ if (src == NULL) {
+ return -1; /* Couldn't find a colon */
+ }
+
+ dev = ma_find_char(src, ',', &commaPos);
+ if (dev == NULL) {
+ dev = "0";
+ ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */
+ } else {
+ dev = dev + 5; /* +5 = ",DEV=" */
+ ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */
+ }
+
+ cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card);
+ if (cardIndex < 0) {
+ return -2; /* Failed to retrieve the card index. */
+ }
+
+
+ /* Construction. */
+ dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':';
+ if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) {
+ return -3;
+ }
+ if (ma_strcat_s(dst, dstSize, ",") != 0) {
+ return -3;
+ }
+ if (ma_strcat_s(dst, dstSize, dev) != 0) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID)
+{
+ ma_uint32 i;
+
+ MA_ASSERT(pHWID != NULL);
+
+ for (i = 0; i < count; ++i) {
+ if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) {
+ return MA_TRUE;
+ }
+ }
+
+ return MA_FALSE;
+}
+
+
+static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, int openMode, ma_snd_pcm_t** ppPCM)
+{
+ ma_snd_pcm_t* pPCM;
+ ma_snd_pcm_stream_t stream;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppPCM != NULL);
+
+ *ppPCM = NULL;
+ pPCM = NULL;
+
+ stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE;
+
+ if (pDeviceID == NULL) {
+ ma_bool32 isDeviceOpen;
+ size_t i;
+
+ /*
+ We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes
+ me feel better to try as hard as we can get to get _something_ working.
+ */
+ const char* defaultDeviceNames[] = {
+ "default",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+ if (shareMode == ma_share_mode_exclusive) {
+ defaultDeviceNames[1] = "hw";
+ defaultDeviceNames[2] = "hw:0";
+ defaultDeviceNames[3] = "hw:0,0";
+ } else {
+ if (deviceType == ma_device_type_playback) {
+ defaultDeviceNames[1] = "dmix";
+ defaultDeviceNames[2] = "dmix:0";
+ defaultDeviceNames[3] = "dmix:0,0";
+ } else {
+ defaultDeviceNames[1] = "dsnoop";
+ defaultDeviceNames[2] = "dsnoop:0";
+ defaultDeviceNames[3] = "dsnoop:0,0";
+ }
+ defaultDeviceNames[4] = "hw";
+ defaultDeviceNames[5] = "hw:0";
+ defaultDeviceNames[6] = "hw:0,0";
+ }
+
+ isDeviceOpen = MA_FALSE;
+ for (i = 0; i < ma_countof(defaultDeviceNames); ++i) {
+ if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') {
+ if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) {
+ isDeviceOpen = MA_TRUE;
+ break;
+ }
+ }
+ }
+
+ if (!isDeviceOpen) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+ } else {
+ /*
+ We're trying to open a specific device. There's a few things to consider here:
+
+ miniaudio recongnizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When
+ an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it
+ finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw").
+ */
+
+ /* May end up needing to make small adjustments to the ID, so make a copy. */
+ ma_device_id deviceID = *pDeviceID;
+ int resultALSA = -ENODEV;
+
+ if (deviceID.alsa[0] != ':') {
+ /* The ID is not in ":0,0" format. Use the ID exactly as-is. */
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode);
+ } else {
+ char hwid[256];
+
+ /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */
+ if (deviceID.alsa[1] == '\0') {
+ deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */
+ }
+
+ if (shareMode == ma_share_mode_shared) {
+ if (deviceType == ma_device_type_playback) {
+ ma_strcpy_s(hwid, sizeof(hwid), "dmix");
+ } else {
+ ma_strcpy_s(hwid, sizeof(hwid), "dsnoop");
+ }
+
+ if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode);
+ }
+ }
+
+ /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */
+ if (resultALSA != 0) {
+ ma_strcpy_s(hwid, sizeof(hwid), "hw");
+ if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode);
+ }
+ }
+ }
+
+ if (resultALSA < 0) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+ }
+
+ *ppPCM = pPCM;
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ int resultALSA;
+ ma_bool32 cbResult = MA_TRUE;
+ char** ppDeviceHints;
+ ma_device_id* pUniqueIDs = NULL;
+ ma_uint32 uniqueIDCount = 0;
+ char** ppNextDeviceHint;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock);
+
+ resultALSA = ((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints);
+ if (resultALSA < 0) {
+ ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ ppNextDeviceHint = ppDeviceHints;
+ while (*ppNextDeviceHint != NULL) {
+ char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME");
+ char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC");
+ char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID");
+ ma_device_type deviceType = ma_device_type_playback;
+ ma_bool32 stopEnumeration = MA_FALSE;
+ char hwid[sizeof(pUniqueIDs->alsa)];
+ ma_device_info deviceInfo;
+
+ if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) {
+ deviceType = ma_device_type_playback;
+ }
+ if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) {
+ deviceType = ma_device_type_capture;
+ }
+
+ if (NAME != NULL) {
+ if (pContext->alsa.useVerboseDeviceEnumeration) {
+ /* Verbose mode. Use the name exactly as-is. */
+ ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
+ } else {
+ /* Simplified mode. Use ":%d,%d" format. */
+ if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) {
+ /*
+ At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the
+ plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device
+ initialization time and is used as an indicator to try and use the most appropriate plugin depending on the
+ device type and sharing mode.
+ */
+ char* dst = hwid;
+ char* src = hwid+2;
+ while ((*dst++ = *src++));
+ } else {
+ /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */
+ ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
+ }
+
+ if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) {
+ goto next_device; /* The device has already been enumerated. Move on to the next one. */
+ } else {
+ /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */
+ size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1);
+ ma_device_id* pNewUniqueIDs = (ma_device_id*)ma_realloc(pUniqueIDs, newCapacity, &pContext->allocationCallbacks);
+ if (pNewUniqueIDs == NULL) {
+ goto next_device; /* Failed to allocate memory. */
+ }
+
+ pUniqueIDs = pNewUniqueIDs;
+ MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid));
+ uniqueIDCount += 1;
+ }
+ }
+ } else {
+ MA_ZERO_MEMORY(hwid, sizeof(hwid));
+ }
+
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1);
+
+ /*
+ There's no good way to determine whether or not a device is the default on Linux. We're just going to do something simple and
+ just use the name of "default" as the indicator.
+ */
+ if (ma_strcmp(deviceInfo.id.alsa, "default") == 0) {
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+
+ /*
+ DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose
+ device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish
+ between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the
+ description.
+
+ The value in DESC seems to be split into two lines, with the first line being the name of the device and the
+ second line being a description of the device. I don't like having the description be across two lines because
+ it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line
+ being put into parentheses. In simplified mode I'm just stripping the second line entirely.
+ */
+ if (DESC != NULL) {
+ int lfPos;
+ const char* line2 = ma_find_char(DESC, '\n', &lfPos);
+ if (line2 != NULL) {
+ line2 += 1; /* Skip past the new-line character. */
+
+ if (pContext->alsa.useVerboseDeviceEnumeration) {
+ /* Verbose mode. Put the second line in brackets. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " (");
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2);
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")");
+ } else {
+ /* Simplified mode. Strip the second line entirely. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
+ }
+ } else {
+ /* There's no second line. Just copy the whole description. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1);
+ }
+ }
+
+ if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) {
+ cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
+ }
+
+ /*
+ Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback
+ again for the other device type in this case. We do this for known devices and where the IOID hint is NULL, which
+ means both Input and Output.
+ */
+ if (cbResult) {
+ if (ma_is_common_device_name__alsa(NAME) || IOID == NULL) {
+ if (deviceType == ma_device_type_playback) {
+ if (!ma_is_capture_device_blacklisted__alsa(NAME)) {
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+ } else {
+ if (!ma_is_playback_device_blacklisted__alsa(NAME)) {
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+ }
+ }
+ }
+
+ if (cbResult == MA_FALSE) {
+ stopEnumeration = MA_TRUE;
+ }
+
+ next_device:
+ free(NAME);
+ free(DESC);
+ free(IOID);
+ ppNextDeviceHint += 1;
+
+ /* We need to stop enumeration if the callback returned false. */
+ if (stopEnumeration) {
+ break;
+ }
+ }
+
+ ma_free(pUniqueIDs, &pContext->allocationCallbacks);
+ ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints);
+
+ ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
+
+ return MA_SUCCESS;
+}
+
+
+typedef struct
+{
+ ma_device_type deviceType;
+ const ma_device_id* pDeviceID;
+ ma_share_mode shareMode;
+ ma_device_info* pDeviceInfo;
+ ma_bool32 foundDevice;
+} ma_context_get_device_info_enum_callback_data__alsa;
+
+static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData)
+{
+ ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData;
+ MA_ASSERT(pData != NULL);
+
+ (void)pContext;
+
+ if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
+ pData->foundDevice = MA_TRUE;
+ } else {
+ if (pData->deviceType == deviceType && (pData->pDeviceID != NULL && ma_strcmp(pData->pDeviceID->alsa, pDeviceInfo->id.alsa) == 0)) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
+ pData->foundDevice = MA_TRUE;
+ }
+ }
+
+ /* Keep enumerating until we have found the device. */
+ return !pData->foundDevice;
+}
+
+static void ma_context_test_rate_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags, ma_device_info* pDeviceInfo)
+{
+ MA_ASSERT(pPCM != NULL);
+ MA_ASSERT(pHWParams != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
+
+ if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats) && ((ma_snd_pcm_hw_params_test_rate_proc)pContext->alsa.snd_pcm_hw_params_test_rate)(pPCM, pHWParams, sampleRate, 0) == 0) {
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags;
+ pDeviceInfo->nativeDataFormatCount += 1;
+ }
+}
+
+static void ma_context_iterate_rates_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 flags, ma_device_info* pDeviceInfo)
+{
+ ma_uint32 iSampleRate;
+ unsigned int minSampleRate;
+ unsigned int maxSampleRate;
+ int sampleRateDir; /* Not used. Just passed into snd_pcm_hw_params_get_rate_min/max(). */
+
+ /* There could be a range. */
+ ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &minSampleRate, &sampleRateDir);
+ ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &maxSampleRate, &sampleRateDir);
+
+ /* Make sure our sample rates are clamped to sane values. Stupid devices like "pulse" will reports rates like "1" which is ridiculus. */
+ minSampleRate = ma_clamp(minSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max);
+ maxSampleRate = ma_clamp(maxSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max);
+
+ for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); iSampleRate += 1) {
+ ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iSampleRate];
+
+ if (standardSampleRate >= minSampleRate && standardSampleRate <= maxSampleRate) {
+ ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, standardSampleRate, flags, pDeviceInfo);
+ }
+ }
+
+ /* Now make sure our min and max rates are included just in case they aren't in the range of our standard rates. */
+ if (!ma_is_standard_sample_rate(minSampleRate)) {
+ ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, minSampleRate, flags, pDeviceInfo);
+ }
+
+ if (!ma_is_standard_sample_rate(maxSampleRate) && maxSampleRate != minSampleRate) {
+ ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, maxSampleRate, flags, pDeviceInfo);
+ }
+}
+
+static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ ma_context_get_device_info_enum_callback_data__alsa data;
+ ma_result result;
+ int resultALSA;
+ ma_snd_pcm_t* pPCM;
+ ma_snd_pcm_hw_params_t* pHWParams;
+ ma_uint32 iFormat;
+ ma_uint32 iChannel;
+
+ MA_ASSERT(pContext != NULL);
+
+ /* We just enumerate to find basic information about the device. */
+ data.deviceType = deviceType;
+ data.pDeviceID = pDeviceID;
+ data.pDeviceInfo = pDeviceInfo;
+ data.foundDevice = MA_FALSE;
+ result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (!data.foundDevice) {
+ return MA_NO_DEVICE;
+ }
+
+ if (ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) {
+ pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ /* For detailed info we need to open the device. */
+ result = ma_context_open_pcm__alsa(pContext, ma_share_mode_shared, deviceType, pDeviceID, 0, &pPCM);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* We need to initialize a HW parameters object in order to know what formats are supported. */
+ pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks);
+ if (pHWParams == NULL) {
+ ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ /*
+ Some ALSA devices can support many permutations of formats, channels and rates. We only support
+ a fixed number of permutations which means we need to employ some strategies to ensure the best
+ combinations are returned. An example is the "pulse" device which can do it's own data conversion
+ in software and as a result can support any combination of format, channels and rate.
+
+ We want to ensure the the first data formats are the best. We have a list of favored sample
+ formats and sample rates, so these will be the basis of our iteration.
+ */
+
+ /* Formats. We just iterate over our standard formats and test them, making sure we reset the configuration space each iteration. */
+ for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); iFormat += 1) {
+ ma_format format = g_maFormatPriorities[iFormat];
+
+ /*
+ For each format we need to make sure we reset the configuration space so we don't return
+ channel counts and rates that aren't compatible with a format.
+ */
+ ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+
+ /* Test the format first. If this fails it means the format is not supported and we can skip it. */
+ if (((ma_snd_pcm_hw_params_test_format_proc)pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)) == 0) {
+ /* The format is supported. */
+ unsigned int minChannels;
+ unsigned int maxChannels;
+
+ /*
+ The configuration space needs to be restricted to this format so we can get an accurate
+ picture of which sample rates and channel counts are support with this format.
+ */
+ ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format));
+
+ /* Now we need to check for supported channels. */
+ ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &minChannels);
+ ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &maxChannels);
+
+ if (minChannels > MA_MAX_CHANNELS) {
+ continue; /* Too many channels. */
+ }
+ if (maxChannels < MA_MIN_CHANNELS) {
+ continue; /* Not enough channels. */
+ }
+
+ /*
+ Make sure the channel count is clamped. This is mainly intended for the max channels
+ because some devices can report an unbound maximum.
+ */
+ minChannels = ma_clamp(minChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS);
+ maxChannels = ma_clamp(maxChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS);
+
+ if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) {
+ /* The device supports all channels. Don't iterate over every single one. Instead just set the channels to 0 which means all channels are supported. */
+ ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, 0, 0, pDeviceInfo); /* Intentionally setting the channel count to 0 as that means all channels are supported. */
+ } else {
+ /* The device only supports a specific set of channels. We need to iterate over all of them. */
+ for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) {
+ /* Test the channel before applying it to the configuration space. */
+ unsigned int channels = iChannel;
+
+ /* Make sure our channel range is reset before testing again or else we'll always fail the test. */
+ ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+ ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format));
+
+ if (((ma_snd_pcm_hw_params_test_channels_proc)pContext->alsa.snd_pcm_hw_params_test_channels)(pPCM, pHWParams, channels) == 0) {
+ /* The channel count is supported. */
+
+ /* The configuration space now needs to be restricted to the channel count before extracting the sample rate. */
+ ((ma_snd_pcm_hw_params_set_channels_proc)pContext->alsa.snd_pcm_hw_params_set_channels)(pPCM, pHWParams, channels);
+
+ /* Only after the configuration space has been restricted to the specific channel count should we iterate over our sample rates. */
+ ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, 0, pDeviceInfo);
+ } else {
+ /* The channel count is not supported. Skip. */
+ }
+ }
+ }
+ } else {
+ /* The format is not supported. Skip. */
+ }
+ }
+
+ ma_free(pHWParams, &pContext->allocationCallbacks);
+
+ ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_uninit__alsa(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ close(pDevice->alsa.wakeupfdCapture);
+ ma_free(pDevice->alsa.pPollDescriptorsCapture, &pDevice->pContext->allocationCallbacks);
+ }
+
+ if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+ close(pDevice->alsa.wakeupfdPlayback);
+ ma_free(pDevice->alsa.pPollDescriptorsPlayback, &pDevice->pContext->allocationCallbacks);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init_by_type__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType)
+{
+ ma_result result;
+ int resultALSA;
+ ma_snd_pcm_t* pPCM;
+ ma_bool32 isUsingMMap;
+ ma_snd_pcm_format_t formatALSA;
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_channel internalChannelMap[MA_MAX_CHANNELS];
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
+ int openMode;
+ ma_snd_pcm_hw_params_t* pHWParams;
+ ma_snd_pcm_sw_params_t* pSWParams;
+ ma_snd_pcm_uframes_t bufferBoundary;
+ int pollDescriptorCount;
+ struct pollfd* pPollDescriptors;
+ int wakeupfd;
+
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */
+ MA_ASSERT(pDevice != NULL);
+
+ formatALSA = ma_convert_ma_format_to_alsa_format(pDescriptor->format);
+
+ openMode = 0;
+ if (pConfig->alsa.noAutoResample) {
+ openMode |= MA_SND_PCM_NO_AUTO_RESAMPLE;
+ }
+ if (pConfig->alsa.noAutoChannels) {
+ openMode |= MA_SND_PCM_NO_AUTO_CHANNELS;
+ }
+ if (pConfig->alsa.noAutoFormat) {
+ openMode |= MA_SND_PCM_NO_AUTO_FORMAT;
+ }
+
+ result = ma_context_open_pcm__alsa(pDevice->pContext, pDescriptor->shareMode, deviceType, pDescriptor->pDeviceID, openMode, &pPCM);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+
+ /* Hardware parameters. */
+ pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_hw_params_sizeof)(), &pDevice->pContext->allocationCallbacks);
+ if (pHWParams == NULL) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for hardware parameters.");
+ return MA_OUT_OF_MEMORY;
+ }
+
+ resultALSA = ((ma_snd_pcm_hw_params_any_proc)pDevice->pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */
+ isUsingMMap = MA_FALSE;
+#if 0 /* NOTE: MMAP mode temporarily disabled. */
+ if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */
+ if (!pConfig->alsa.noMMap && ma_device__is_async(pDevice)) {
+ if (((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) {
+ pDevice->alsa.isUsingMMap = MA_TRUE;
+ }
+ }
+ }
+#endif
+
+ if (!isUsingMMap) {
+ resultALSA = ((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+ }
+
+ /*
+ Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't
+ find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS.
+ */
+
+ /* Format. */
+ {
+ /*
+ At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is
+ supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one.
+ */
+ if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN || ((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, formatALSA) != 0) {
+ /* We're either requesting the native format or the specified format is not supported. */
+ size_t iFormat;
+
+ formatALSA = MA_SND_PCM_FORMAT_UNKNOWN;
+ for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) {
+ if (((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat])) == 0) {
+ formatALSA = ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat]);
+ break;
+ }
+ }
+
+ if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats.");
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+
+ resultALSA = ((ma_snd_pcm_hw_params_set_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ internalFormat = ma_format_from_alsa(formatALSA);
+ if (internalFormat == ma_format_unknown) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio.");
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+
+ /* Channels. */
+ {
+ unsigned int channels = pDescriptor->channels;
+ if (channels == 0) {
+ channels = MA_DEFAULT_CHANNELS;
+ }
+
+ resultALSA = ((ma_snd_pcm_hw_params_set_channels_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ internalChannels = (ma_uint32)channels;
+ }
+
+ /* Sample Rate */
+ {
+ unsigned int sampleRate;
+
+ /*
+ It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes
+ problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable
+ resampling.
+
+ To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a
+ sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling
+ doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly
+ faster rate.
+
+ miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine
+ for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very
+ good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion.
+
+ I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce
+ this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins.
+ */
+ ((ma_snd_pcm_hw_params_set_rate_resample_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0);
+
+ sampleRate = pDescriptor->sampleRate;
+ if (sampleRate == 0) {
+ sampleRate = MA_DEFAULT_SAMPLE_RATE;
+ }
+
+ resultALSA = ((ma_snd_pcm_hw_params_set_rate_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ internalSampleRate = (ma_uint32)sampleRate;
+ }
+
+ /* Periods. */
+ {
+ ma_uint32 periods = pDescriptor->periodCount;
+
+ resultALSA = ((ma_snd_pcm_hw_params_set_periods_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ internalPeriods = periods;
+ }
+
+ /* Buffer Size */
+ {
+ ma_snd_pcm_uframes_t actualBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile) * internalPeriods;
+
+ resultALSA = ((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ internalPeriodSizeInFrames = actualBufferSizeInFrames / internalPeriods;
+ }
+
+ /* Apply hardware parameters. */
+ resultALSA = ((ma_snd_pcm_hw_params_proc)pDevice->pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ ma_free(pHWParams, &pDevice->pContext->allocationCallbacks);
+ pHWParams = NULL;
+
+
+ /* Software parameters. */
+ pSWParams = (ma_snd_pcm_sw_params_t*)ma_calloc(((ma_snd_pcm_sw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_sw_params_sizeof)(), &pDevice->pContext->allocationCallbacks);
+ if (pSWParams == NULL) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for software parameters.");
+ return MA_OUT_OF_MEMORY;
+ }
+
+ resultALSA = ((ma_snd_pcm_sw_params_current_proc)pDevice->pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams);
+ if (resultALSA < 0) {
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ resultALSA = ((ma_snd_pcm_sw_params_set_avail_min_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalPeriodSizeInFrames));
+ if (resultALSA < 0) {
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ resultALSA = ((ma_snd_pcm_sw_params_get_boundary_proc)pDevice->pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary);
+ if (resultALSA < 0) {
+ bufferBoundary = internalPeriodSizeInFrames * internalPeriods;
+ }
+
+ if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */
+ /*
+ Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to
+ the size of a period. But for full-duplex we need to set it such that it is at least two periods.
+ */
+ resultALSA = ((ma_snd_pcm_sw_params_set_start_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalPeriodSizeInFrames*2);
+ if (resultALSA < 0) {
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ resultALSA = ((ma_snd_pcm_sw_params_set_stop_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary);
+ if (resultALSA < 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+ }
+
+ resultALSA = ((ma_snd_pcm_sw_params_proc)pDevice->pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams);
+ if (resultALSA < 0) {
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ ma_free(pSWParams, &pDevice->pContext->allocationCallbacks);
+ pSWParams = NULL;
+
+
+ /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */
+ {
+ ma_snd_pcm_chmap_t* pChmap = ((ma_snd_pcm_get_chmap_proc)pDevice->pContext->alsa.snd_pcm_get_chmap)(pPCM);
+ if (pChmap != NULL) {
+ ma_uint32 iChannel;
+
+ /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */
+ if (pChmap->channels >= internalChannels) {
+ /* Drop excess channels. */
+ for (iChannel = 0; iChannel < internalChannels; ++iChannel) {
+ internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
+ }
+ } else {
+ ma_uint32 i;
+
+ /*
+ Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate
+ channels. If validation fails, fall back to defaults.
+ */
+ ma_bool32 isValid = MA_TRUE;
+
+ /* Fill with defaults. */
+ ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels);
+
+ /* Overwrite first pChmap->channels channels. */
+ for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) {
+ internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
+ }
+
+ /* Validate. */
+ for (i = 0; i < internalChannels && isValid; ++i) {
+ ma_uint32 j;
+ for (j = i+1; j < internalChannels; ++j) {
+ if (internalChannelMap[i] == internalChannelMap[j]) {
+ isValid = MA_FALSE;
+ break;
+ }
+ }
+ }
+
+ /* If our channel map is invalid, fall back to defaults. */
+ if (!isValid) {
+ ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels);
+ }
+ }
+
+ free(pChmap);
+ pChmap = NULL;
+ } else {
+ /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */
+ ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels);
+ }
+ }
+
+
+ /*
+ We need to retrieve the poll descriptors so we can use poll() to wait for data to become
+ available for reading or writing. There's no well defined maximum for this so we're just going
+ to allocate this on the heap.
+ */
+ pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_count_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_count)(pPCM);
+ if (pollDescriptorCount <= 0) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors count.");
+ return MA_ERROR;
+ }
+
+ pPollDescriptors = (struct pollfd*)ma_malloc(sizeof(*pPollDescriptors) * (pollDescriptorCount + 1), &pDevice->pContext->allocationCallbacks); /* +1 because we want room for the wakeup descriptor. */
+ if (pPollDescriptors == NULL) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for poll descriptors.");
+ return MA_OUT_OF_MEMORY;
+ }
+
+ /*
+ We need an eventfd to wakeup from poll() and avoid a deadlock in situations where the driver
+ never returns from writei() and readi(). This has been observed with the "pulse" device.
+ */
+ wakeupfd = eventfd(0, 0);
+ if (wakeupfd < 0) {
+ ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to create eventfd for poll wakeup.");
+ return ma_result_from_errno(errno);
+ }
+
+ /* We'll place the wakeup fd at the start of the buffer. */
+ pPollDescriptors[0].fd = wakeupfd;
+ pPollDescriptors[0].events = POLLIN; /* We only care about waiting to read from the wakeup file descriptor. */
+ pPollDescriptors[0].revents = 0;
+
+ /* We can now extract the PCM poll descriptors which we place after the wakeup descriptor. */
+ pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors)(pPCM, pPollDescriptors + 1, pollDescriptorCount); /* +1 because we want to place these descriptors after the wakeup descriptor. */
+ if (pollDescriptorCount <= 0) {
+ close(wakeupfd);
+ ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors.");
+ return MA_ERROR;
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ pDevice->alsa.pollDescriptorCountCapture = pollDescriptorCount;
+ pDevice->alsa.pPollDescriptorsCapture = pPollDescriptors;
+ pDevice->alsa.wakeupfdCapture = wakeupfd;
+ } else {
+ pDevice->alsa.pollDescriptorCountPlayback = pollDescriptorCount;
+ pDevice->alsa.pPollDescriptorsPlayback = pPollDescriptors;
+ pDevice->alsa.wakeupfdPlayback = wakeupfd;
+ }
+
+
+ /* We're done. Prepare the device. */
+ resultALSA = ((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM);
+ if (resultALSA < 0) {
+ close(wakeupfd);
+ ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+
+ if (deviceType == ma_device_type_capture) {
+ pDevice->alsa.pPCMCapture = (ma_ptr)pPCM;
+ pDevice->alsa.isUsingMMapCapture = isUsingMMap;
+ } else {
+ pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM;
+ pDevice->alsa.isUsingMMapPlayback = isUsingMMap;
+ }
+
+ pDescriptor->format = internalFormat;
+ pDescriptor->channels = internalChannels;
+ pDescriptor->sampleRate = internalSampleRate;
+ ma_channel_map_copy(pDescriptor->channelMap, internalChannelMap, ma_min(internalChannels, MA_MAX_CHANNELS));
+ pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames;
+ pDescriptor->periodCount = internalPeriods;
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->alsa);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__alsa(ma_device* pDevice)
+{
+ int resultALSA;
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device.");
+ return ma_result_from_errno(-resultALSA);
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* Don't need to do anything for playback because it'll be started automatically when enough data has been written. */
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__alsa(ma_device* pDevice)
+{
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device...\n");
+ ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device successful.\n");
+
+ /* We need to prepare the device again, otherwise we won't be able to restart the device. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device...\n");
+ if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device failed.\n");
+ } else {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device successful.\n");
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device...\n");
+ ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device successful.\n");
+
+ /* We need to prepare the device again, otherwise we won't be able to restart the device. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device...\n");
+ if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device failed.\n");
+ } else {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device successful.\n");
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_wait__alsa(ma_device* pDevice, ma_snd_pcm_t* pPCM, struct pollfd* pPollDescriptors, int pollDescriptorCount, short requiredEvent)
+{
+ for (;;) {
+ unsigned short revents;
+ int resultALSA;
+ int resultPoll = poll(pPollDescriptors, pollDescriptorCount, -1);
+ if (resultPoll < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] poll() failed.");
+ return ma_result_from_errno(errno);
+ }
+
+ /*
+ Before checking the ALSA poll descriptor flag we need to check if the wakeup descriptor
+ has had it's POLLIN flag set. If so, we need to actually read the data and then exit
+ function. The wakeup descriptor will be the first item in the descriptors buffer.
+ */
+ if ((pPollDescriptors[0].revents & POLLIN) != 0) {
+ ma_uint64 t;
+ int resultRead = read(pPollDescriptors[0].fd, &t, sizeof(t)); /* <-- Important that we read here so that the next write() does not block. */
+ if (resultRead < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] read() failed.");
+ return ma_result_from_errno(errno);
+ }
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] POLLIN set for wakeupfd\n");
+ return MA_DEVICE_NOT_STARTED;
+ }
+
+ /*
+ Getting here means that some data should be able to be read. We need to use ALSA to
+ translate the revents flags for us.
+ */
+ resultALSA = ((ma_snd_pcm_poll_descriptors_revents_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_revents)(pPCM, pPollDescriptors + 1, pollDescriptorCount - 1, &revents); /* +1, -1 to ignore the wakeup descriptor. */
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_poll_descriptors_revents() failed.");
+ return ma_result_from_errno(-resultALSA);
+ }
+
+ if ((revents & POLLERR) != 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] POLLERR detected.");
+ return ma_result_from_errno(errno);
+ }
+
+ if ((revents & requiredEvent) == requiredEvent) {
+ break; /* We're done. Data available for reading or writing. */
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_wait_read__alsa(ma_device* pDevice)
+{
+ return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, (struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, pDevice->alsa.pollDescriptorCountCapture + 1, POLLIN); /* +1 to account for the wakeup descriptor. */
+}
+
+static ma_result ma_device_wait_write__alsa(ma_device* pDevice)
+{
+ return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, (struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, pDevice->alsa.pollDescriptorCountPlayback + 1, POLLOUT); /* +1 to account for the wakeup descriptor. */
+}
+
+static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_snd_pcm_sframes_t resultALSA = 0;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
+
+ while (ma_device_get_state(pDevice) == ma_device_state_started) {
+ ma_result result;
+
+ /* The first thing to do is wait for data to become available for reading. This will return an error code if the device has been stopped. */
+ result = ma_device_wait_read__alsa(pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* Getting here means we should have data available. */
+ resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount);
+ if (resultALSA >= 0) {
+ break; /* Success. */
+ } else {
+ if (resultALSA == -EAGAIN) {
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (read)\n");*/
+ continue; /* Try again. */
+ } else if (resultALSA == -EPIPE) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (read)\n");
+
+ /* Overrun. Recover and try again. If this fails we need to return an error. */
+ resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE);
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.");
+ return ma_result_from_errno((int)-resultALSA);
+ }
+
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.");
+ return ma_result_from_errno((int)-resultALSA);
+ }
+
+ continue; /* Try reading again. */
+ }
+ }
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = resultALSA;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_snd_pcm_sframes_t resultALSA = 0;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFrames != NULL);
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ while (ma_device_get_state(pDevice) == ma_device_state_started) {
+ ma_result result;
+
+ /* The first thing to do is wait for space to become available for writing. This will return an error code if the device has been stopped. */
+ result = ma_device_wait_write__alsa(pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount);
+ if (resultALSA >= 0) {
+ break; /* Success. */
+ } else {
+ if (resultALSA == -EAGAIN) {
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (write)\n");*/
+ continue; /* Try again. */
+ } else if (resultALSA == -EPIPE) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (write)\n");
+
+ /* Underrun. Recover and try again. If this fails we need to return an error. */
+ resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE); /* MA_TRUE=silent (don't print anything on error). */
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.");
+ return ma_result_from_errno((int)-resultALSA);
+ }
+
+ /*
+ In my testing I have had a situation where writei() does not automatically restart the device even though I've set it
+ up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of
+ frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure
+ if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't
+ quite right here.
+ */
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+ if (resultALSA < 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.");
+ return ma_result_from_errno((int)-resultALSA);
+ }
+
+ continue; /* Try writing again. */
+ }
+ }
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = resultALSA;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_data_loop_wakeup__alsa(ma_device* pDevice)
+{
+ ma_uint64 t = 1;
+ int resultWrite = 0;
+
+ MA_ASSERT(pDevice != NULL);
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up...\n");
+
+ /* Write to an eventfd to trigger a wakeup from poll() and abort any reading or writing. */
+ if (pDevice->alsa.pPollDescriptorsCapture != NULL) {
+ resultWrite = write(pDevice->alsa.wakeupfdCapture, &t, sizeof(t));
+ }
+ if (pDevice->alsa.pPollDescriptorsPlayback != NULL) {
+ resultWrite = write(pDevice->alsa.wakeupfdPlayback, &t, sizeof(t));
+ }
+
+ if (resultWrite < 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] write() failed.\n");
+ return ma_result_from_errno(errno);
+ }
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up completed successfully.\n");
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_uninit__alsa(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_alsa);
+
+ /* Clean up memory for memory leak checkers. */
+ ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)();
+
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->alsa.asoundSO);
+#endif
+
+ ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__alsa(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ ma_result result;
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libasoundNames[] = {
+ "libasound.so.2",
+ "libasound.so"
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libasoundNames); ++i) {
+ pContext->alsa.asoundSO = ma_dlopen(pContext, libasoundNames[i]);
+ if (pContext->alsa.asoundSO != NULL) {
+ break;
+ }
+ }
+
+ if (pContext->alsa.asoundSO == NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to open shared object.\n");
+ return MA_NO_BACKEND;
+ }
+
+ pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_open");
+ pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_close");
+ pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof");
+ pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_any");
+ pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format");
+ pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first");
+ pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask");
+ pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels");
+ pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near");
+ pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_minmax");
+ pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample");
+ pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate");
+ pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near");
+ pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near");
+ pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near");
+ pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access");
+ pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format");
+ pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels");
+ pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min");
+ pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max");
+ pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate");
+ pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min");
+ pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max");
+ pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size");
+ pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods");
+ pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access");
+ pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_test_format");
+ pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_test_channels");
+ pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_test_rate");
+ pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params");
+ pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof");
+ pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_current");
+ pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary");
+ pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min");
+ pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold");
+ pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold");
+ pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params");
+ pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof");
+ pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_test");
+ pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_get_chmap");
+ pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_state");
+ pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_prepare");
+ pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_start");
+ pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drop");
+ pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drain");
+ pContext->alsa.snd_pcm_reset = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_reset");
+ pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_hint");
+ pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_get_hint");
+ pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_card_get_index");
+ pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_free_hint");
+ pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_begin");
+ pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_commit");
+ pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_recover");
+ pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_readi");
+ pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_writei");
+ pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail");
+ pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail_update");
+ pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_wait");
+ pContext->alsa.snd_pcm_nonblock = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_nonblock");
+ pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info");
+ pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_sizeof");
+ pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_get_name");
+ pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_poll_descriptors");
+ pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_count");
+ pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_revents");
+ pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_config_update_free_global");
+#else
+ /* The system below is just for type safety. */
+ ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open;
+ ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close;
+ ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof;
+ ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any;
+ ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format;
+ ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first;
+ ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask;
+ ma_snd_pcm_hw_params_set_channels_proc _snd_pcm_hw_params_set_channels = snd_pcm_hw_params_set_channels;
+ ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near;
+ ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample;
+ ma_snd_pcm_hw_params_set_rate_near _snd_pcm_hw_params_set_rate = snd_pcm_hw_params_set_rate;
+ ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near;
+ ma_snd_pcm_hw_params_set_rate_minmax_proc _snd_pcm_hw_params_set_rate_minmax = snd_pcm_hw_params_set_rate_minmax;
+ ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near;
+ ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near;
+ ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access;
+ ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format;
+ ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels;
+ ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min;
+ ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max;
+ ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate;
+ ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min;
+ ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max;
+ ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size;
+ ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods;
+ ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access;
+ ma_snd_pcm_hw_params_test_format_proc _snd_pcm_hw_params_test_format = snd_pcm_hw_params_test_format;
+ ma_snd_pcm_hw_params_test_channels_proc _snd_pcm_hw_params_test_channels = snd_pcm_hw_params_test_channels;
+ ma_snd_pcm_hw_params_test_rate_proc _snd_pcm_hw_params_test_rate = snd_pcm_hw_params_test_rate;
+ ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params;
+ ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof;
+ ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current;
+ ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary;
+ ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min;
+ ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold;
+ ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold;
+ ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params;
+ ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof;
+ ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test;
+ ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap;
+ ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state;
+ ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare;
+ ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start;
+ ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop;
+ ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain;
+ ma_snd_pcm_reset_proc _snd_pcm_reset = snd_pcm_reset;
+ ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint;
+ ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint;
+ ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index;
+ ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint;
+ ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin;
+ ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit;
+ ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover;
+ ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi;
+ ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei;
+ ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail;
+ ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update;
+ ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait;
+ ma_snd_pcm_nonblock_proc _snd_pcm_nonblock = snd_pcm_nonblock;
+ ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info;
+ ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof;
+ ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name;
+ ma_snd_pcm_poll_descriptors _snd_pcm_poll_descriptors = snd_pcm_poll_descriptors;
+ ma_snd_pcm_poll_descriptors_count _snd_pcm_poll_descriptors_count = snd_pcm_poll_descriptors_count;
+ ma_snd_pcm_poll_descriptors_revents _snd_pcm_poll_descriptors_revents = snd_pcm_poll_descriptors_revents;
+ ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global;
+
+ pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open;
+ pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close;
+ pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof;
+ pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any;
+ pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format;
+ pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first;
+ pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask;
+ pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)_snd_pcm_hw_params_set_channels;
+ pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near;
+ pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)_snd_pcm_hw_params_set_channels_minmax;
+ pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample;
+ pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)_snd_pcm_hw_params_set_rate;
+ pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near;
+ pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near;
+ pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near;
+ pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access;
+ pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format;
+ pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels;
+ pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min;
+ pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max;
+ pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate;
+ pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)_snd_pcm_hw_params_get_rate_min;
+ pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)_snd_pcm_hw_params_get_rate_max;
+ pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size;
+ pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods;
+ pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access;
+ pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)_snd_pcm_hw_params_test_format;
+ pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)_snd_pcm_hw_params_test_channels;
+ pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)_snd_pcm_hw_params_test_rate;
+ pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params;
+ pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof;
+ pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current;
+ pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary;
+ pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min;
+ pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold;
+ pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold;
+ pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params;
+ pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof;
+ pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test;
+ pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap;
+ pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state;
+ pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare;
+ pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start;
+ pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop;
+ pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain;
+ pContext->alsa.snd_pcm_reset = (ma_proc)_snd_pcm_reset;
+ pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint;
+ pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint;
+ pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index;
+ pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint;
+ pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin;
+ pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit;
+ pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover;
+ pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi;
+ pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei;
+ pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail;
+ pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update;
+ pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait;
+ pContext->alsa.snd_pcm_nonblock = (ma_proc)_snd_pcm_nonblock;
+ pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info;
+ pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof;
+ pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name;
+ pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)_snd_pcm_poll_descriptors;
+ pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)_snd_pcm_poll_descriptors_count;
+ pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)_snd_pcm_poll_descriptors_revents;
+ pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global;
+#endif
+
+ pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration;
+
+ result = ma_mutex_init(&pContext->alsa.internalDeviceEnumLock);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration.");
+ return result;
+ }
+
+ pCallbacks->onContextInit = ma_context_init__alsa;
+ pCallbacks->onContextUninit = ma_context_uninit__alsa;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__alsa;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__alsa;
+ pCallbacks->onDeviceInit = ma_device_init__alsa;
+ pCallbacks->onDeviceUninit = ma_device_uninit__alsa;
+ pCallbacks->onDeviceStart = ma_device_start__alsa;
+ pCallbacks->onDeviceStop = ma_device_stop__alsa;
+ pCallbacks->onDeviceRead = ma_device_read__alsa;
+ pCallbacks->onDeviceWrite = ma_device_write__alsa;
+ pCallbacks->onDeviceDataLoop = NULL;
+ pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__alsa;
+
+ return MA_SUCCESS;
+}
+#endif /* ALSA */
+
+
+
+/******************************************************************************
+
+PulseAudio Backend
+
+******************************************************************************/
+#ifdef MA_HAS_PULSEAUDIO
+/*
+The PulseAudio API, along with Apple's Core Audio, is the worst of the maintream audio APIs. This is a brief description of what's going on
+in the PulseAudio backend. I apologize if this gets a bit ranty for your liking - you might want to skip this discussion.
+
+PulseAudio has something they call the "Simple API", which unfortunately isn't suitable for miniaudio. I've not seen anywhere where it
+allows you to enumerate over devices, nor does it seem to support the ability to stop and start streams. Looking at the documentation, it
+appears as though the stream is constantly running and you prevent sound from being emitted or captured by simply not calling the read or
+write functions. This is not a professional solution as it would be much better to *actually* stop the underlying stream. Perhaps the
+simple API has some smarts to do this automatically, but I'm not sure. Another limitation with the simple API is that it seems inefficient
+when you want to have multiple streams to a single context. For these reasons, miniaudio is not using the simple API.
+
+Since we're not using the simple API, we're left with the asynchronous API as our only other option. And boy, is this where it starts to
+get fun, and I don't mean that in a good way...
+
+The problems start with the very name of the API - "asynchronous". Yes, this is an asynchronous oriented API which means your commands
+don't immediately take effect. You instead need to issue your commands, and then wait for them to complete. The waiting mechanism is
+enabled through the use of a "main loop". In the asychronous API you cannot get away from the main loop, and the main loop is where almost
+all of PulseAudio's problems stem from.
+
+When you first initialize PulseAudio you need an object referred to as "main loop". You can implement this yourself by defining your own
+vtable, but it's much easier to just use one of the built-in main loop implementations. There's two generic implementations called
+pa_mainloop and pa_threaded_mainloop, and another implementation specific to GLib called pa_glib_mainloop. We're using pa_threaded_mainloop
+because it simplifies management of the worker thread. The idea of the main loop object is pretty self explanatory - you're supposed to use
+it to implement a worker thread which runs in a loop. The main loop is where operations are actually executed.
+
+To initialize the main loop, you just use `pa_threaded_mainloop_new()`. This is the first function you'll call. You can then get a pointer
+to the vtable with `pa_threaded_mainloop_get_api()` (the main loop vtable is called `pa_mainloop_api`). Again, you can bypass the threaded
+main loop object entirely and just implement `pa_mainloop_api` directly, but there's no need for it unless you're doing something extremely
+specialized such as if you want to integrate it into your application's existing main loop infrastructure.
+
+(EDIT 2021-01-26: miniaudio is no longer using `pa_threaded_mainloop` due to this issue: https://github.com/mackron/miniaudio/issues/262.
+It is now using `pa_mainloop` which turns out to be a simpler solution anyway. The rest of this rant still applies, however.)
+
+Once you have your main loop vtable (the `pa_mainloop_api` object) you can create the PulseAudio context. This is very similar to
+miniaudio's context and they map to each other quite well. You have one context to many streams, which is basically the same as miniaudio's
+one `ma_context` to many `ma_device`s. Here's where it starts to get annoying, however. When you first create the PulseAudio context, which
+is done with `pa_context_new()`, it's not actually connected to anything. When you connect, you call `pa_context_connect()`. However, if
+you remember, PulseAudio is an asynchronous API. That means you cannot just assume the context is connected after `pa_context_context()`
+has returned. You instead need to wait for it to connect. To do this, you need to either wait for a callback to get fired, which you can
+set with `pa_context_set_state_callback()`, or you can continuously poll the context's state. Either way, you need to run this in a loop.
+All objects from here out are created from the context, and, I believe, you can't be creating these objects until the context is connected.
+This waiting loop is therefore unavoidable. In order for the waiting to ever complete, however, the main loop needs to be running. Before
+attempting to connect the context, the main loop needs to be started with `pa_threaded_mainloop_start()`.
+
+The reason for this asynchronous design is to support cases where you're connecting to a remote server, say through a local network or an
+internet connection. However, the *VAST* majority of cases don't involve this at all - they just connect to a local "server" running on the
+host machine. The fact that this would be the default rather than making `pa_context_connect()` synchronous tends to boggle the mind.
+
+Once the context has been created and connected you can start creating a stream. A PulseAudio stream is analogous to miniaudio's device.
+The initialization of a stream is fairly standard - you configure some attributes (analogous to miniaudio's device config) and then call
+`pa_stream_new()` to actually create it. Here is where we start to get into "operations". When configuring the stream, you can get
+information about the source (such as sample format, sample rate, etc.), however it's not synchronous. Instead, a `pa_operation` object
+is returned from `pa_context_get_source_info_by_name()` (capture) or `pa_context_get_sink_info_by_name()` (playback). Then, you need to
+run a loop (again!) to wait for the operation to complete which you can determine via a callback or polling, just like we did with the
+context. Then, as an added bonus, you need to decrement the reference counter of the `pa_operation` object to ensure memory is cleaned up.
+All of that just to retrieve basic information about a device!
+
+Once the basic information about the device has been retrieved, miniaudio can now create the stream with `ma_stream_new()`. Like the
+context, this needs to be connected. But we need to be careful here, because we're now about to introduce one of the most horrific design
+choices in PulseAudio.
+
+PulseAudio allows you to specify a callback that is fired when data can be written to or read from a stream. The language is important here
+because PulseAudio takes it literally, specifically the "can be". You would think these callbacks would be appropriate as the place for
+writing and reading data to and from the stream, and that would be right, except when it's not. When you initialize the stream, you can
+set a flag that tells PulseAudio to not start the stream automatically. This is required because miniaudio does not auto-start devices
+straight after initialization - you need to call `ma_device_start()` manually. The problem is that even when this flag is specified,
+PulseAudio will immediately fire it's write or read callback. This is *technically* correct (based on the wording in the documentation)
+because indeed, data *can* be written at this point. The problem is that it's not *practical*. It makes sense that the write/read callback
+would be where a program will want to write or read data to or from the stream, but when it's called before the application has even
+requested that the stream be started, it's just not practical because the program probably isn't ready for any kind of data delivery at
+that point (it may still need to load files or whatnot). Instead, this callback should only be fired when the application requests the
+stream be started which is how it works with literally *every* other callback-based audio API. Since miniaudio forbids firing of the data
+callback until the device has been started (as it should be with *all* callback based APIs), logic needs to be added to ensure miniaudio
+doesn't just blindly fire the application-defined data callback from within the PulseAudio callback before the stream has actually been
+started. The device state is used for this - if the state is anything other than `ma_device_state_starting` or `ma_device_state_started`, the main data
+callback is not fired.
+
+This, unfortunately, is not the end of the problems with the PulseAudio write callback. Any normal callback based audio API will
+continuously fire the callback at regular intervals based on the size of the internal buffer. This will only ever be fired when the device
+is running, and will be fired regardless of whether or not the user actually wrote anything to the device/stream. This not the case in
+PulseAudio. In PulseAudio, the data callback will *only* be called if you wrote something to it previously. That means, if you don't call
+`pa_stream_write()`, the callback will not get fired. On the surface you wouldn't think this would matter because you should be always
+writing data, and if you don't have anything to write, just write silence. That's fine until you want to drain the stream. You see, if
+you're continuously writing data to the stream, the stream will never get drained! That means in order to drain the stream, you need to
+*not* write data to it! But remember, when you don't write data to the stream, the callback won't get fired again! Why is draining
+important? Because that's how we've defined stopping to work in miniaudio. In miniaudio, stopping the device requires it to be drained
+before returning from ma_device_stop(). So we've stopped the device, which requires us to drain, but draining requires us to *not* write
+data to the stream (or else it won't ever complete draining), but not writing to the stream means the callback won't get fired again!
+
+This becomes a problem when stopping and then restarting the device. When the device is stopped, it's drained, which requires us to *not*
+write anything to the stream. But then, since we didn't write anything to it, the write callback will *never* get called again if we just
+resume the stream naively. This means that starting the stream requires us to write data to the stream from outside the callback. This
+disconnect is something PulseAudio has got seriously wrong - there should only ever be a single source of data delivery, that being the
+callback. (I have tried using `pa_stream_flush()` to trigger the write callback to fire, but this just doesn't work for some reason.)
+
+Once you've created the stream, you need to connect it which involves the whole waiting procedure. This is the same process as the context,
+only this time you'll poll for the state with `pa_stream_get_status()`. The starting and stopping of a streaming is referred to as
+"corking" in PulseAudio. The analogy is corking a barrel. To start the stream, you uncork it, to stop it you cork it. Personally I think
+it's silly - why would you not just call it "starting" and "stopping" like any other normal audio API? Anyway, the act of corking is, you
+guessed it, asynchronous. This means you'll need our waiting loop as usual. Again, why this asynchronous design is the default is
+absolutely beyond me. Would it really be that hard to just make it run synchronously?
+
+Teardown is pretty simple (what?!). It's just a matter of calling the relevant `_unref()` function on each object in reverse order that
+they were initialized in.
+
+That's about it from the PulseAudio side. A bit ranty, I know, but they really need to fix that main loop and callback system. They're
+embarrassingly unpractical. The main loop thing is an easy fix - have synchronous versions of all APIs. If an application wants these to
+run asynchronously, they can execute them in a separate thread themselves. The desire to run these asynchronously is such a niche
+requirement - it makes no sense to make it the default. The stream write callback needs to be change, or an alternative provided, that is
+constantly fired, regardless of whether or not `pa_stream_write()` has been called, and it needs to take a pointer to a buffer as a
+parameter which the program just writes to directly rather than having to call `pa_stream_writable_size()` and `pa_stream_write()`. These
+changes alone will change PulseAudio from one of the worst audio APIs to one of the best.
+*/
+
+
+/*
+It is assumed pulseaudio.h is available when linking at compile time. When linking at compile time, we use the declarations in the header
+to check for type safety. We cannot do this when linking at run time because the header might not be available.
+*/
+#ifdef MA_NO_RUNTIME_LINKING
+
+/* pulseaudio.h marks some functions with "inline" which isn't always supported. Need to emulate it. */
+#if !defined(__cplusplus)
+ #if defined(__STRICT_ANSI__)
+ #if !defined(inline)
+ #define inline __inline__ __attribute__((always_inline))
+ #define MA_INLINE_DEFINED
+ #endif
+ #endif
+#endif
+#include <pulse/pulseaudio.h>
+#if defined(MA_INLINE_DEFINED)
+ #undef inline
+ #undef MA_INLINE_DEFINED
+#endif
+
+#define MA_PA_OK PA_OK
+#define MA_PA_ERR_ACCESS PA_ERR_ACCESS
+#define MA_PA_ERR_INVALID PA_ERR_INVALID
+#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY
+#define MA_PA_ERR_NOTSUPPORTED PA_ERR_NOTSUPPORTED
+
+#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX
+#define MA_PA_RATE_MAX PA_RATE_MAX
+
+typedef pa_context_flags_t ma_pa_context_flags_t;
+#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS
+#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN
+#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL
+
+typedef pa_stream_flags_t ma_pa_stream_flags_t;
+#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS
+#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED
+#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING
+#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC
+#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE
+#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS
+#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS
+#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT
+#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE
+#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS
+#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE
+#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE
+#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT
+#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED
+#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY
+#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS
+#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND
+#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED
+#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND
+#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME
+#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH
+
+typedef pa_sink_flags_t ma_pa_sink_flags_t;
+#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS
+#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL
+#define MA_PA_SINK_LATENCY PA_SINK_LATENCY
+#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE
+#define MA_PA_SINK_NETWORK PA_SINK_NETWORK
+#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL
+#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME
+#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME
+#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY
+#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS
+
+typedef pa_source_flags_t ma_pa_source_flags_t;
+#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS
+#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL
+#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY
+#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE
+#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK
+#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL
+#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME
+#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY
+#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME
+
+typedef pa_context_state_t ma_pa_context_state_t;
+#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED
+#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING
+#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING
+#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME
+#define MA_PA_CONTEXT_READY PA_CONTEXT_READY
+#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED
+#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED
+
+typedef pa_stream_state_t ma_pa_stream_state_t;
+#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED
+#define MA_PA_STREAM_CREATING PA_STREAM_CREATING
+#define MA_PA_STREAM_READY PA_STREAM_READY
+#define MA_PA_STREAM_FAILED PA_STREAM_FAILED
+#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED
+
+typedef pa_operation_state_t ma_pa_operation_state_t;
+#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING
+#define MA_PA_OPERATION_DONE PA_OPERATION_DONE
+#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED
+
+typedef pa_sink_state_t ma_pa_sink_state_t;
+#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE
+#define MA_PA_SINK_RUNNING PA_SINK_RUNNING
+#define MA_PA_SINK_IDLE PA_SINK_IDLE
+#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED
+
+typedef pa_source_state_t ma_pa_source_state_t;
+#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE
+#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING
+#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE
+#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED
+
+typedef pa_seek_mode_t ma_pa_seek_mode_t;
+#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE
+#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE
+#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ
+#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END
+
+typedef pa_channel_position_t ma_pa_channel_position_t;
+#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID
+#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER
+#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT
+#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT
+#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER
+#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT
+#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT
+#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0
+#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1
+#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2
+#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3
+#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4
+#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5
+#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6
+#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7
+#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8
+#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9
+#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10
+#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11
+#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12
+#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13
+#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14
+#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15
+#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16
+#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17
+#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18
+#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19
+#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20
+#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21
+#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22
+#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23
+#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24
+#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25
+#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26
+#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27
+#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28
+#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29
+#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30
+#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31
+#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER
+#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT
+#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT
+#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER
+#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER
+
+typedef pa_channel_map_def_t ma_pa_channel_map_def_t;
+#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF
+#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA
+#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX
+#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX
+#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS
+#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT
+
+typedef pa_sample_format_t ma_pa_sample_format_t;
+#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID
+#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8
+#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW
+#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW
+#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE
+#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE
+#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE
+#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE
+#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE
+#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE
+#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE
+#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE
+#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE
+#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE
+
+typedef pa_mainloop ma_pa_mainloop;
+typedef pa_threaded_mainloop ma_pa_threaded_mainloop;
+typedef pa_mainloop_api ma_pa_mainloop_api;
+typedef pa_context ma_pa_context;
+typedef pa_operation ma_pa_operation;
+typedef pa_stream ma_pa_stream;
+typedef pa_spawn_api ma_pa_spawn_api;
+typedef pa_buffer_attr ma_pa_buffer_attr;
+typedef pa_channel_map ma_pa_channel_map;
+typedef pa_cvolume ma_pa_cvolume;
+typedef pa_sample_spec ma_pa_sample_spec;
+typedef pa_sink_info ma_pa_sink_info;
+typedef pa_source_info ma_pa_source_info;
+
+typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t;
+typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t;
+typedef pa_source_info_cb_t ma_pa_source_info_cb_t;
+typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t;
+typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t;
+typedef pa_stream_notify_cb_t ma_pa_stream_notify_cb_t;
+typedef pa_free_cb_t ma_pa_free_cb_t;
+#else
+#define MA_PA_OK 0
+#define MA_PA_ERR_ACCESS 1
+#define MA_PA_ERR_INVALID 2
+#define MA_PA_ERR_NOENTITY 5
+#define MA_PA_ERR_NOTSUPPORTED 19
+
+#define MA_PA_CHANNELS_MAX 32
+#define MA_PA_RATE_MAX 384000
+
+typedef int ma_pa_context_flags_t;
+#define MA_PA_CONTEXT_NOFLAGS 0x00000000
+#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001
+#define MA_PA_CONTEXT_NOFAIL 0x00000002
+
+typedef int ma_pa_stream_flags_t;
+#define MA_PA_STREAM_NOFLAGS 0x00000000
+#define MA_PA_STREAM_START_CORKED 0x00000001
+#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002
+#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004
+#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008
+#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010
+#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020
+#define MA_PA_STREAM_FIX_FORMAT 0x00000040
+#define MA_PA_STREAM_FIX_RATE 0x00000080
+#define MA_PA_STREAM_FIX_CHANNELS 0x00000100
+#define MA_PA_STREAM_DONT_MOVE 0x00000200
+#define MA_PA_STREAM_VARIABLE_RATE 0x00000400
+#define MA_PA_STREAM_PEAK_DETECT 0x00000800
+#define MA_PA_STREAM_START_MUTED 0x00001000
+#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000
+#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000
+#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000
+#define MA_PA_STREAM_START_UNMUTED 0x00010000
+#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000
+#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000
+#define MA_PA_STREAM_PASSTHROUGH 0x00080000
+
+typedef int ma_pa_sink_flags_t;
+#define MA_PA_SINK_NOFLAGS 0x00000000
+#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001
+#define MA_PA_SINK_LATENCY 0x00000002
+#define MA_PA_SINK_HARDWARE 0x00000004
+#define MA_PA_SINK_NETWORK 0x00000008
+#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010
+#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020
+#define MA_PA_SINK_FLAT_VOLUME 0x00000040
+#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080
+#define MA_PA_SINK_SET_FORMATS 0x00000100
+
+typedef int ma_pa_source_flags_t;
+#define MA_PA_SOURCE_NOFLAGS 0x00000000
+#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001
+#define MA_PA_SOURCE_LATENCY 0x00000002
+#define MA_PA_SOURCE_HARDWARE 0x00000004
+#define MA_PA_SOURCE_NETWORK 0x00000008
+#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010
+#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020
+#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040
+#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080
+
+typedef int ma_pa_context_state_t;
+#define MA_PA_CONTEXT_UNCONNECTED 0
+#define MA_PA_CONTEXT_CONNECTING 1
+#define MA_PA_CONTEXT_AUTHORIZING 2
+#define MA_PA_CONTEXT_SETTING_NAME 3
+#define MA_PA_CONTEXT_READY 4
+#define MA_PA_CONTEXT_FAILED 5
+#define MA_PA_CONTEXT_TERMINATED 6
+
+typedef int ma_pa_stream_state_t;
+#define MA_PA_STREAM_UNCONNECTED 0
+#define MA_PA_STREAM_CREATING 1
+#define MA_PA_STREAM_READY 2
+#define MA_PA_STREAM_FAILED 3
+#define MA_PA_STREAM_TERMINATED 4
+
+typedef int ma_pa_operation_state_t;
+#define MA_PA_OPERATION_RUNNING 0
+#define MA_PA_OPERATION_DONE 1
+#define MA_PA_OPERATION_CANCELLED 2
+
+typedef int ma_pa_sink_state_t;
+#define MA_PA_SINK_INVALID_STATE -1
+#define MA_PA_SINK_RUNNING 0
+#define MA_PA_SINK_IDLE 1
+#define MA_PA_SINK_SUSPENDED 2
+
+typedef int ma_pa_source_state_t;
+#define MA_PA_SOURCE_INVALID_STATE -1
+#define MA_PA_SOURCE_RUNNING 0
+#define MA_PA_SOURCE_IDLE 1
+#define MA_PA_SOURCE_SUSPENDED 2
+
+typedef int ma_pa_seek_mode_t;
+#define MA_PA_SEEK_RELATIVE 0
+#define MA_PA_SEEK_ABSOLUTE 1
+#define MA_PA_SEEK_RELATIVE_ON_READ 2
+#define MA_PA_SEEK_RELATIVE_END 3
+
+typedef int ma_pa_channel_position_t;
+#define MA_PA_CHANNEL_POSITION_INVALID -1
+#define MA_PA_CHANNEL_POSITION_MONO 0
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2
+#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3
+#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4
+#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5
+#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6
+#define MA_PA_CHANNEL_POSITION_LFE 7
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9
+#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10
+#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11
+#define MA_PA_CHANNEL_POSITION_AUX0 12
+#define MA_PA_CHANNEL_POSITION_AUX1 13
+#define MA_PA_CHANNEL_POSITION_AUX2 14
+#define MA_PA_CHANNEL_POSITION_AUX3 15
+#define MA_PA_CHANNEL_POSITION_AUX4 16
+#define MA_PA_CHANNEL_POSITION_AUX5 17
+#define MA_PA_CHANNEL_POSITION_AUX6 18
+#define MA_PA_CHANNEL_POSITION_AUX7 19
+#define MA_PA_CHANNEL_POSITION_AUX8 20
+#define MA_PA_CHANNEL_POSITION_AUX9 21
+#define MA_PA_CHANNEL_POSITION_AUX10 22
+#define MA_PA_CHANNEL_POSITION_AUX11 23
+#define MA_PA_CHANNEL_POSITION_AUX12 24
+#define MA_PA_CHANNEL_POSITION_AUX13 25
+#define MA_PA_CHANNEL_POSITION_AUX14 26
+#define MA_PA_CHANNEL_POSITION_AUX15 27
+#define MA_PA_CHANNEL_POSITION_AUX16 28
+#define MA_PA_CHANNEL_POSITION_AUX17 29
+#define MA_PA_CHANNEL_POSITION_AUX18 30
+#define MA_PA_CHANNEL_POSITION_AUX19 31
+#define MA_PA_CHANNEL_POSITION_AUX20 32
+#define MA_PA_CHANNEL_POSITION_AUX21 33
+#define MA_PA_CHANNEL_POSITION_AUX22 34
+#define MA_PA_CHANNEL_POSITION_AUX23 35
+#define MA_PA_CHANNEL_POSITION_AUX24 36
+#define MA_PA_CHANNEL_POSITION_AUX25 37
+#define MA_PA_CHANNEL_POSITION_AUX26 38
+#define MA_PA_CHANNEL_POSITION_AUX27 39
+#define MA_PA_CHANNEL_POSITION_AUX28 40
+#define MA_PA_CHANNEL_POSITION_AUX29 41
+#define MA_PA_CHANNEL_POSITION_AUX30 42
+#define MA_PA_CHANNEL_POSITION_AUX31 43
+#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50
+#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE
+
+typedef int ma_pa_channel_map_def_t;
+#define MA_PA_CHANNEL_MAP_AIFF 0
+#define MA_PA_CHANNEL_MAP_ALSA 1
+#define MA_PA_CHANNEL_MAP_AUX 2
+#define MA_PA_CHANNEL_MAP_WAVEEX 3
+#define MA_PA_CHANNEL_MAP_OSS 4
+#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF
+
+typedef int ma_pa_sample_format_t;
+#define MA_PA_SAMPLE_INVALID -1
+#define MA_PA_SAMPLE_U8 0
+#define MA_PA_SAMPLE_ALAW 1
+#define MA_PA_SAMPLE_ULAW 2
+#define MA_PA_SAMPLE_S16LE 3
+#define MA_PA_SAMPLE_S16BE 4
+#define MA_PA_SAMPLE_FLOAT32LE 5
+#define MA_PA_SAMPLE_FLOAT32BE 6
+#define MA_PA_SAMPLE_S32LE 7
+#define MA_PA_SAMPLE_S32BE 8
+#define MA_PA_SAMPLE_S24LE 9
+#define MA_PA_SAMPLE_S24BE 10
+#define MA_PA_SAMPLE_S24_32LE 11
+#define MA_PA_SAMPLE_S24_32BE 12
+
+typedef struct ma_pa_mainloop ma_pa_mainloop;
+typedef struct ma_pa_threaded_mainloop ma_pa_threaded_mainloop;
+typedef struct ma_pa_mainloop_api ma_pa_mainloop_api;
+typedef struct ma_pa_context ma_pa_context;
+typedef struct ma_pa_operation ma_pa_operation;
+typedef struct ma_pa_stream ma_pa_stream;
+typedef struct ma_pa_spawn_api ma_pa_spawn_api;
+
+typedef struct
+{
+ ma_uint32 maxlength;
+ ma_uint32 tlength;
+ ma_uint32 prebuf;
+ ma_uint32 minreq;
+ ma_uint32 fragsize;
+} ma_pa_buffer_attr;
+
+typedef struct
+{
+ ma_uint8 channels;
+ ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX];
+} ma_pa_channel_map;
+
+typedef struct
+{
+ ma_uint8 channels;
+ ma_uint32 values[MA_PA_CHANNELS_MAX];
+} ma_pa_cvolume;
+
+typedef struct
+{
+ ma_pa_sample_format_t format;
+ ma_uint32 rate;
+ ma_uint8 channels;
+} ma_pa_sample_spec;
+
+typedef struct
+{
+ const char* name;
+ ma_uint32 index;
+ const char* description;
+ ma_pa_sample_spec sample_spec;
+ ma_pa_channel_map channel_map;
+ ma_uint32 owner_module;
+ ma_pa_cvolume volume;
+ int mute;
+ ma_uint32 monitor_source;
+ const char* monitor_source_name;
+ ma_uint64 latency;
+ const char* driver;
+ ma_pa_sink_flags_t flags;
+ void* proplist;
+ ma_uint64 configured_latency;
+ ma_uint32 base_volume;
+ ma_pa_sink_state_t state;
+ ma_uint32 n_volume_steps;
+ ma_uint32 card;
+ ma_uint32 n_ports;
+ void** ports;
+ void* active_port;
+ ma_uint8 n_formats;
+ void** formats;
+} ma_pa_sink_info;
+
+typedef struct
+{
+ const char *name;
+ ma_uint32 index;
+ const char *description;
+ ma_pa_sample_spec sample_spec;
+ ma_pa_channel_map channel_map;
+ ma_uint32 owner_module;
+ ma_pa_cvolume volume;
+ int mute;
+ ma_uint32 monitor_of_sink;
+ const char *monitor_of_sink_name;
+ ma_uint64 latency;
+ const char *driver;
+ ma_pa_source_flags_t flags;
+ void* proplist;
+ ma_uint64 configured_latency;
+ ma_uint32 base_volume;
+ ma_pa_source_state_t state;
+ ma_uint32 n_volume_steps;
+ ma_uint32 card;
+ ma_uint32 n_ports;
+ void** ports;
+ void* active_port;
+ ma_uint8 n_formats;
+ void** formats;
+} ma_pa_source_info;
+
+typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata);
+typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata);
+typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata);
+typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata);
+typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata);
+typedef void (* ma_pa_stream_notify_cb_t) (ma_pa_stream* s, void* userdata);
+typedef void (* ma_pa_free_cb_t) (void* p);
+#endif
+
+
+typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) (void);
+typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m);
+typedef void (* ma_pa_mainloop_quit_proc) (ma_pa_mainloop* m, int retval);
+typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m);
+typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval);
+typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m);
+typedef ma_pa_threaded_mainloop* (* ma_pa_threaded_mainloop_new_proc) (void);
+typedef void (* ma_pa_threaded_mainloop_free_proc) (ma_pa_threaded_mainloop* m);
+typedef int (* ma_pa_threaded_mainloop_start_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_stop_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_lock_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_unlock_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_wait_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_signal_proc) (ma_pa_threaded_mainloop* m, int wait_for_accept);
+typedef void (* ma_pa_threaded_mainloop_accept_proc) (ma_pa_threaded_mainloop* m);
+typedef int (* ma_pa_threaded_mainloop_get_retval_proc) (ma_pa_threaded_mainloop* m);
+typedef ma_pa_mainloop_api* (* ma_pa_threaded_mainloop_get_api_proc) (ma_pa_threaded_mainloop* m);
+typedef int (* ma_pa_threaded_mainloop_in_thread_proc) (ma_pa_threaded_mainloop* m);
+typedef void (* ma_pa_threaded_mainloop_set_name_proc) (ma_pa_threaded_mainloop* m, const char* name);
+typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name);
+typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c);
+typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api);
+typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c);
+typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata);
+typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c);
+typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata);
+typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o);
+typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o);
+typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def);
+typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m);
+typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss);
+typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map);
+typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s);
+typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream);
+typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags);
+typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s);
+typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s);
+typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s);
+typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s);
+typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s);
+typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s);
+typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
+typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
+typedef void (* ma_pa_stream_set_suspended_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata);
+typedef void (* ma_pa_stream_set_moved_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata);
+typedef int (* ma_pa_stream_is_suspended_proc) (const ma_pa_stream* s);
+typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s);
+typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes);
+typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek);
+typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes);
+typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s);
+typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s);
+typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s);
+
+typedef struct
+{
+ ma_uint32 count;
+ ma_uint32 capacity;
+ ma_device_info* pInfo;
+} ma_pulse_device_enum_data;
+
+static ma_result ma_result_from_pulse(int result)
+{
+ if (result < 0) {
+ return MA_ERROR;
+ }
+
+ switch (result) {
+ case MA_PA_OK: return MA_SUCCESS;
+ case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED;
+ case MA_PA_ERR_INVALID: return MA_INVALID_ARGS;
+ case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE;
+ default: return MA_ERROR;
+ }
+}
+
+#if 0
+static ma_pa_sample_format_t ma_format_to_pulse(ma_format format)
+{
+ if (ma_is_little_endian()) {
+ switch (format) {
+ case ma_format_s16: return MA_PA_SAMPLE_S16LE;
+ case ma_format_s24: return MA_PA_SAMPLE_S24LE;
+ case ma_format_s32: return MA_PA_SAMPLE_S32LE;
+ case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE;
+ default: break;
+ }
+ } else {
+ switch (format) {
+ case ma_format_s16: return MA_PA_SAMPLE_S16BE;
+ case ma_format_s24: return MA_PA_SAMPLE_S24BE;
+ case ma_format_s32: return MA_PA_SAMPLE_S32BE;
+ case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE;
+ default: break;
+ }
+ }
+
+ /* Endian agnostic. */
+ switch (format) {
+ case ma_format_u8: return MA_PA_SAMPLE_U8;
+ default: return MA_PA_SAMPLE_INVALID;
+ }
+}
+#endif
+
+static ma_format ma_format_from_pulse(ma_pa_sample_format_t format)
+{
+ if (ma_is_little_endian()) {
+ switch (format) {
+ case MA_PA_SAMPLE_S16LE: return ma_format_s16;
+ case MA_PA_SAMPLE_S24LE: return ma_format_s24;
+ case MA_PA_SAMPLE_S32LE: return ma_format_s32;
+ case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32;
+ default: break;
+ }
+ } else {
+ switch (format) {
+ case MA_PA_SAMPLE_S16BE: return ma_format_s16;
+ case MA_PA_SAMPLE_S24BE: return ma_format_s24;
+ case MA_PA_SAMPLE_S32BE: return ma_format_s32;
+ case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32;
+ default: break;
+ }
+ }
+
+ /* Endian agnostic. */
+ switch (format) {
+ case MA_PA_SAMPLE_U8: return ma_format_u8;
+ default: return ma_format_unknown;
+ }
+}
+
+static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position)
+{
+ switch (position)
+ {
+ case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE;
+ case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO;
+ case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
+ case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
+ case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
+ case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER;
+ case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT;
+ case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT;
+ case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE;
+ case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
+ case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
+ case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0;
+ case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1;
+ case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2;
+ case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3;
+ case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4;
+ case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5;
+ case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6;
+ case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7;
+ case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8;
+ case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9;
+ case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10;
+ case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11;
+ case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12;
+ case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13;
+ case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14;
+ case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15;
+ case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16;
+ case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17;
+ case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18;
+ case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19;
+ case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20;
+ case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21;
+ case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22;
+ case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23;
+ case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24;
+ case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25;
+ case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26;
+ case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27;
+ case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28;
+ case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29;
+ case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30;
+ case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31;
+ case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
+ default: return MA_CHANNEL_NONE;
+ }
+}
+
+#if 0
+static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position)
+{
+ switch (position)
+ {
+ case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID;
+ case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT;
+ case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT;
+ case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER;
+ case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE;
+ case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT;
+ case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT;
+ case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
+ case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
+ case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER;
+ case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT;
+ case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT;
+ case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER;
+ case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
+ case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
+ case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
+ case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT;
+ case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER;
+ case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
+ case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18;
+ case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19;
+ case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20;
+ case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21;
+ case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22;
+ case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23;
+ case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24;
+ case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25;
+ case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26;
+ case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27;
+ case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28;
+ case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29;
+ case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30;
+ case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31;
+ default: return (ma_pa_channel_position_t)position;
+ }
+}
+#endif
+
+static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP)
+{
+ int resultPA;
+ ma_pa_operation_state_t state;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pOP != NULL);
+
+ for (;;) {
+ state = ((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP);
+ if (state != MA_PA_OPERATION_RUNNING) {
+ break; /* Done. */
+ }
+
+ resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL);
+ if (resultPA < 0) {
+ return ma_result_from_pulse(resultPA);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_wait_for_operation_and_unref__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP)
+{
+ ma_result result;
+
+ if (pOP == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+
+ return result;
+}
+
+static ma_result ma_wait_for_pa_context_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pPulseContext)
+{
+ int resultPA;
+ ma_pa_context_state_t state;
+
+ for (;;) {
+ state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)((ma_pa_context*)pPulseContext);
+ if (state == MA_PA_CONTEXT_READY) {
+ break; /* Done. */
+ }
+
+ if (state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context.");
+ return MA_ERROR;
+ }
+
+ resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL);
+ if (resultPA < 0) {
+ return ma_result_from_pulse(resultPA);
+ }
+ }
+
+ /* Should never get here. */
+ return MA_SUCCESS;
+}
+
+static ma_result ma_wait_for_pa_stream_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pStream)
+{
+ int resultPA;
+ ma_pa_stream_state_t state;
+
+ for (;;) {
+ state = ((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pStream);
+ if (state == MA_PA_STREAM_READY) {
+ break; /* Done. */
+ }
+
+ if (state == MA_PA_STREAM_FAILED || state == MA_PA_STREAM_TERMINATED) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio stream.");
+ return MA_ERROR;
+ }
+
+ resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL);
+ if (resultPA < 0) {
+ return ma_result_from_pulse(resultPA);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_init_pa_mainloop_and_pa_context__pulse(ma_context* pContext, const char* pApplicationName, const char* pServerName, ma_bool32 tryAutoSpawn, ma_ptr* ppMainLoop, ma_ptr* ppPulseContext)
+{
+ ma_result result;
+ ma_ptr pMainLoop;
+ ma_ptr pPulseContext;
+
+ MA_ASSERT(ppMainLoop != NULL);
+ MA_ASSERT(ppPulseContext != NULL);
+
+ /* The PulseAudio context maps well to miniaudio's notion of a context. The pa_context object will be initialized as part of the ma_context. */
+ pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
+ if (pMainLoop == NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create mainloop.");
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+
+ pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pMainLoop), pApplicationName);
+ if (pPulseContext == NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context.");
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop));
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+
+ /* Now we need to connect to the context. Everything is asynchronous so we need to wait for it to connect before returning. */
+ result = ma_result_from_pulse(((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pPulseContext, pServerName, (tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL));
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context.");
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop));
+ return result;
+ }
+
+ /* Since ma_context_init() runs synchronously we need to wait for the PulseAudio context to connect before we return. */
+ result = ma_wait_for_pa_context_to_connect__pulse(pContext, pMainLoop, pPulseContext);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Waiting for connection failed.");
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop));
+ return result;
+ }
+
+ *ppMainLoop = pMainLoop;
+ *ppPulseContext = pPulseContext;
+
+ return MA_SUCCESS;
+}
+
+
+static void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_pa_sink_info* pInfoOut;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ pInfoOut = (ma_pa_sink_info*)pUserData;
+ MA_ASSERT(pInfoOut != NULL);
+
+ *pInfoOut = *pInfo;
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_pa_source_info* pInfoOut;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ pInfoOut = (ma_pa_source_info*)pUserData;
+ MA_ASSERT(pInfoOut != NULL);
+
+ *pInfoOut = *pInfo;
+
+ (void)pPulseContext; /* Unused. */
+}
+
+#if 0
+static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_device* pDevice;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1);
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_device* pDevice;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1);
+
+ (void)pPulseContext; /* Unused. */
+}
+#endif
+
+static ma_result ma_context_get_sink_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_sink_info* pSinkInfo)
+{
+ ma_pa_operation* pOP;
+
+ pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_sink_info_callback, pSinkInfo);
+ if (pOP == NULL) {
+ return MA_ERROR;
+ }
+
+ return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP);
+}
+
+static ma_result ma_context_get_source_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_source_info* pSourceInfo)
+{
+ ma_pa_operation* pOP;
+
+ pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_source_info_callback, pSourceInfo);
+ if (pOP == NULL) {
+ return MA_ERROR;
+ }
+
+ return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP);
+}
+
+static ma_result ma_context_get_default_device_index__pulse(ma_context* pContext, ma_device_type deviceType, ma_uint32* pIndex)
+{
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pIndex != NULL);
+
+ if (pIndex != NULL) {
+ *pIndex = (ma_uint32)-1;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ ma_pa_sink_info sinkInfo;
+ result = ma_context_get_sink_info__pulse(pContext, NULL, &sinkInfo);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (pIndex != NULL) {
+ *pIndex = sinkInfo.index;
+ }
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ ma_pa_source_info sourceInfo;
+ result = ma_context_get_source_info__pulse(pContext, NULL, &sourceInfo);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (pIndex != NULL) {
+ *pIndex = sourceInfo.index;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+
+typedef struct
+{
+ ma_context* pContext;
+ ma_enum_devices_callback_proc callback;
+ void* pUserData;
+ ma_bool32 isTerminated;
+ ma_uint32 defaultDeviceIndexPlayback;
+ ma_uint32 defaultDeviceIndexCapture;
+} ma_context_enumerate_devices_callback_data__pulse;
+
+static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData)
+{
+ ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
+ ma_device_info deviceInfo;
+
+ MA_ASSERT(pData != NULL);
+
+ if (endOfList || pData->isTerminated) {
+ return;
+ }
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ /* The name from PulseAudio is the ID for miniaudio. */
+ if (pSinkInfo->name != NULL) {
+ ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1);
+ }
+
+ /* The description from PulseAudio is the name for miniaudio. */
+ if (pSinkInfo->description != NULL) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1);
+ }
+
+ if (pSinkInfo->index == pData->defaultDeviceIndexPlayback) {
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+ pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData);
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSourceInfo, int endOfList, void* pUserData)
+{
+ ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
+ ma_device_info deviceInfo;
+
+ MA_ASSERT(pData != NULL);
+
+ if (endOfList || pData->isTerminated) {
+ return;
+ }
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ /* The name from PulseAudio is the ID for miniaudio. */
+ if (pSourceInfo->name != NULL) {
+ ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSourceInfo->name, (size_t)-1);
+ }
+
+ /* The description from PulseAudio is the name for miniaudio. */
+ if (pSourceInfo->description != NULL) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSourceInfo->description, (size_t)-1);
+ }
+
+ if (pSourceInfo->index == pData->defaultDeviceIndexCapture) {
+ deviceInfo.isDefault = MA_TRUE;
+ }
+
+ pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData);
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_result result = MA_SUCCESS;
+ ma_context_enumerate_devices_callback_data__pulse callbackData;
+ ma_pa_operation* pOP = NULL;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ callbackData.pContext = pContext;
+ callbackData.callback = callback;
+ callbackData.pUserData = pUserData;
+ callbackData.isTerminated = MA_FALSE;
+ callbackData.defaultDeviceIndexPlayback = (ma_uint32)-1;
+ callbackData.defaultDeviceIndexCapture = (ma_uint32)-1;
+
+ /* We need to get the index of the default devices. */
+ ma_context_get_default_device_index__pulse(pContext, ma_device_type_playback, &callbackData.defaultDeviceIndexPlayback);
+ ma_context_get_default_device_index__pulse(pContext, ma_device_type_capture, &callbackData.defaultDeviceIndexCapture);
+
+ /* Playback. */
+ if (!callbackData.isTerminated) {
+ pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_sink_callback__pulse, &callbackData);
+ if (pOP == NULL) {
+ result = MA_ERROR;
+ goto done;
+ }
+
+ result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
+ }
+
+
+ /* Capture. */
+ if (!callbackData.isTerminated) {
+ pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_source_callback__pulse, &callbackData);
+ if (pOP == NULL) {
+ result = MA_ERROR;
+ goto done;
+ }
+
+ result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
+ }
+
+done:
+ return result;
+}
+
+
+typedef struct
+{
+ ma_device_info* pDeviceInfo;
+ ma_uint32 defaultDeviceIndex;
+ ma_bool32 foundDevice;
+} ma_context_get_device_info_callback_data__pulse;
+
+static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ MA_ASSERT(pData != NULL);
+ pData->foundDevice = MA_TRUE;
+
+ if (pInfo->name != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+ }
+
+ if (pInfo->description != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
+ }
+
+ /*
+ We're just reporting a single data format here. I think technically PulseAudio might support
+ all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to
+ report the "native" device format.
+ */
+ pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format);
+ pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pData->pDeviceInfo->nativeDataFormatCount = 1;
+
+ if (pData->defaultDeviceIndex == pInfo->index) {
+ pData->pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
+
+ if (endOfList > 0) {
+ return;
+ }
+
+ MA_ASSERT(pData != NULL);
+ pData->foundDevice = MA_TRUE;
+
+ if (pInfo->name != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+ }
+
+ if (pInfo->description != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
+ }
+
+ /*
+ We're just reporting a single data format here. I think technically PulseAudio might support
+ all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to
+ report the "native" device format.
+ */
+ pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format);
+ pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pData->pDeviceInfo->nativeDataFormatCount = 1;
+
+ if (pData->defaultDeviceIndex == pInfo->index) {
+ pData->pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ (void)pPulseContext; /* Unused. */
+}
+
+static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ ma_result result = MA_SUCCESS;
+ ma_context_get_device_info_callback_data__pulse callbackData;
+ ma_pa_operation* pOP = NULL;
+ const char* pDeviceName = NULL;
+
+ MA_ASSERT(pContext != NULL);
+
+ callbackData.pDeviceInfo = pDeviceInfo;
+ callbackData.foundDevice = MA_FALSE;
+
+ if (pDeviceID != NULL) {
+ pDeviceName = pDeviceID->pulse;
+ } else {
+ pDeviceName = NULL;
+ }
+
+ result = ma_context_get_default_device_index__pulse(pContext, deviceType, &callbackData.defaultDeviceIndex);
+
+ if (deviceType == ma_device_type_playback) {
+ pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_sink_callback__pulse, &callbackData);
+ } else {
+ pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_source_callback__pulse, &callbackData);
+ }
+
+ if (pOP != NULL) {
+ ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP);
+ } else {
+ result = MA_ERROR;
+ goto done;
+ }
+
+ if (!callbackData.foundDevice) {
+ result = MA_NO_DEVICE;
+ goto done;
+ }
+
+done:
+ return result;
+}
+
+static ma_result ma_device_uninit__pulse(ma_device* pDevice)
+{
+ ma_context* pContext;
+
+ MA_ASSERT(pDevice != NULL);
+
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ }
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_duplex_rb_uninit(&pDevice->duplexRB);
+ }
+
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
+
+ return MA_SUCCESS;
+}
+
+static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 periodSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss)
+{
+ ma_pa_buffer_attr attr;
+ attr.maxlength = periodSizeInFrames * periods * ma_get_bytes_per_frame(ma_format_from_pulse(ss->format), ss->channels);
+ attr.tlength = attr.maxlength / periods;
+ attr.prebuf = (ma_uint32)-1;
+ attr.minreq = (ma_uint32)-1;
+ attr.fragsize = attr.maxlength / periods;
+
+ return attr;
+}
+
+static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap)
+{
+ static int g_StreamCounter = 0;
+ char actualStreamName[256];
+
+ if (pStreamName != NULL) {
+ ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1);
+ } else {
+ ma_strcpy_s(actualStreamName, sizeof(actualStreamName), "miniaudio:");
+ ma_itoa_s(g_StreamCounter, actualStreamName + 8, sizeof(actualStreamName)-8, 10); /* 8 = strlen("miniaudio:") */
+ }
+ g_StreamCounter += 1;
+
+ return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap);
+}
+
+
+static void ma_device_on_read__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ ma_uint32 bpf;
+ ma_uint32 deviceState;
+ ma_uint64 frameCount;
+ ma_uint64 framesProcessed;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /*
+ Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio
+ can fire this callback before the stream has even started. Ridiculous.
+ */
+ deviceState = ma_device_get_state(pDevice);
+ if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) {
+ return;
+ }
+
+ bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ MA_ASSERT(bpf > 0);
+
+ frameCount = byteCount / bpf;
+ framesProcessed = 0;
+
+ while (ma_device_get_state(pDevice) == ma_device_state_started && framesProcessed < frameCount) {
+ const void* pMappedPCMFrames;
+ size_t bytesMapped;
+ ma_uint64 framesMapped;
+
+ int pulseResult = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)(pStream, &pMappedPCMFrames, &bytesMapped);
+ if (pulseResult < 0) {
+ break; /* Failed to map. Abort. */
+ }
+
+ framesMapped = bytesMapped / bpf;
+ if (framesMapped > 0) {
+ if (pMappedPCMFrames != NULL) {
+ ma_device_handle_backend_data_callback(pDevice, NULL, pMappedPCMFrames, framesMapped);
+ } else {
+ /* It's a hole. */
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[PulseAudio] ma_device_on_read__pulse: Hole.\n");
+ }
+
+ pulseResult = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)(pStream);
+ if (pulseResult < 0) {
+ break; /* Failed to drop the buffer. */
+ }
+
+ framesProcessed += framesMapped;
+
+ } else {
+ /* Nothing was mapped. Just abort. */
+ break;
+ }
+ }
+}
+
+static ma_result ma_device_write_to_stream__pulse(ma_device* pDevice, ma_pa_stream* pStream, ma_uint64* pFramesProcessed)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint64 framesProcessed = 0;
+ size_t bytesMapped;
+ ma_uint32 bpf;
+ ma_uint32 deviceState;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pStream != NULL);
+
+ bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ MA_ASSERT(bpf > 0);
+
+ deviceState = ma_device_get_state(pDevice);
+
+ bytesMapped = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)(pStream);
+ if (bytesMapped != (size_t)-1) {
+ if (bytesMapped > 0) {
+ ma_uint64 framesMapped;
+ void* pMappedPCMFrames;
+ int pulseResult = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)(pStream, &pMappedPCMFrames, &bytesMapped);
+ if (pulseResult < 0) {
+ result = ma_result_from_pulse(pulseResult);
+ goto done;
+ }
+
+ framesMapped = bytesMapped / bpf;
+
+ if (deviceState == ma_device_state_started || deviceState == ma_device_state_starting) { /* Check for starting state just in case this is being used to do the initial fill. */
+ ma_device_handle_backend_data_callback(pDevice, pMappedPCMFrames, NULL, framesMapped);
+ } else {
+ /* Device is not started. Write silence. */
+ ma_silence_pcm_frames(pMappedPCMFrames, framesMapped, pDevice->playback.format, pDevice->playback.channels);
+ }
+
+ pulseResult = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)(pStream, pMappedPCMFrames, bytesMapped, NULL, 0, MA_PA_SEEK_RELATIVE);
+ if (pulseResult < 0) {
+ result = ma_result_from_pulse(pulseResult);
+ goto done; /* Failed to write data to stream. */
+ }
+
+ framesProcessed += framesMapped;
+ } else {
+ result = MA_SUCCESS; /* No data available for writing. */
+ goto done;
+ }
+ } else {
+ result = MA_ERROR; /* Failed to retrieve the writable size. Abort. */
+ goto done;
+ }
+
+done:
+ if (pFramesProcessed != NULL) {
+ *pFramesProcessed = framesProcessed;
+ }
+
+ return result;
+}
+
+static void ma_device_on_write__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ ma_uint32 bpf;
+ ma_uint64 frameCount;
+ ma_uint64 framesProcessed;
+ ma_uint32 deviceState;
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /*
+ Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio
+ can fire this callback before the stream has even started. Ridiculous.
+ */
+ deviceState = ma_device_get_state(pDevice);
+ if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) {
+ return;
+ }
+
+ bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ MA_ASSERT(bpf > 0);
+
+ frameCount = byteCount / bpf;
+ framesProcessed = 0;
+
+ while (framesProcessed < frameCount) {
+ ma_uint64 framesProcessedThisIteration;
+
+ /* Don't keep trying to process frames if the device isn't started. */
+ deviceState = ma_device_get_state(pDevice);
+ if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) {
+ break;
+ }
+
+ result = ma_device_write_to_stream__pulse(pDevice, pStream, &framesProcessedThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ framesProcessed += framesProcessedThisIteration;
+ }
+}
+
+static void ma_device_on_suspended__pulse(ma_pa_stream* pStream, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ int suspended;
+
+ (void)pStream;
+
+ suspended = ((ma_pa_stream_is_suspended_proc)pDevice->pContext->pulse.pa_stream_is_suspended)(pStream);
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. pa_stream_is_suspended() returned %d.\n", suspended);
+
+ if (suspended < 0) {
+ return;
+ }
+
+ if (suspended == 1) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Suspended.\n");
+ ma_device__on_notification_stopped(pDevice);
+ } else {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Resumed.\n");
+ ma_device__on_notification_started(pDevice);
+ }
+}
+
+static void ma_device_on_rerouted__pulse(ma_pa_stream* pStream, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+
+ (void)pStream;
+ (void)pUserData;
+
+ ma_device__on_notification_rerouted(pDevice);
+}
+
+static ma_result ma_device_init__pulse(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ /*
+ Notes for PulseAudio:
+
+ - We're always using native format/channels/rate regardless of whether or not PulseAudio
+ supports the format directly through their own data conversion system. I'm doing this to
+ reduce as much variability from the PulseAudio side as possible because it's seems to be
+ extremely unreliable at everything it does.
+
+ - When both the period size in frames and milliseconds are 0, we default to miniaudio's
+ default buffer sizes rather than leaving it up to PulseAudio because I don't trust
+ PulseAudio to give us any kind of reasonable latency by default.
+
+ - Do not ever, *ever* forget to use MA_PA_STREAM_ADJUST_LATENCY. If you don't specify this
+ flag, capture mode will just not work properly until you open another PulseAudio app.
+ */
+
+ ma_result result = MA_SUCCESS;
+ int error = 0;
+ const char* devPlayback = NULL;
+ const char* devCapture = NULL;
+ ma_format format = ma_format_unknown;
+ ma_uint32 channels = 0;
+ ma_uint32 sampleRate = 0;
+ ma_pa_sink_info sinkInfo;
+ ma_pa_source_info sourceInfo;
+ ma_pa_sample_spec ss;
+ ma_pa_channel_map cmap;
+ ma_pa_buffer_attr attr;
+ const ma_pa_sample_spec* pActualSS = NULL;
+ const ma_pa_channel_map* pActualCMap = NULL;
+ const ma_pa_buffer_attr* pActualAttr = NULL;
+ ma_uint32 iChannel;
+ ma_pa_stream_flags_t streamFlags;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->pulse);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* No exclusive mode with the PulseAudio backend. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ if (pDescriptorPlayback->pDeviceID != NULL) {
+ devPlayback = pDescriptorPlayback->pDeviceID->pulse;
+ }
+
+ format = pDescriptorPlayback->format;
+ channels = pDescriptorPlayback->channels;
+ sampleRate = pDescriptorPlayback->sampleRate;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ if (pDescriptorCapture->pDeviceID != NULL) {
+ devCapture = pDescriptorCapture->pDeviceID->pulse;
+ }
+
+ format = pDescriptorCapture->format;
+ channels = pDescriptorCapture->channels;
+ sampleRate = pDescriptorCapture->sampleRate;
+ }
+
+
+
+ result = ma_init_pa_mainloop_and_pa_context__pulse(pDevice->pContext, pDevice->pContext->pulse.pApplicationName, pDevice->pContext->pulse.pServerName, MA_FALSE, &pDevice->pulse.pMainLoop, &pDevice->pulse.pPulseContext);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize PA mainloop and context for device.\n");
+ return result;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ result = ma_context_get_source_info__pulse(pDevice->pContext, devCapture, &sourceInfo);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device.");
+ goto on_error0;
+ }
+
+ ss = sourceInfo.sample_spec;
+ cmap = sourceInfo.channel_map;
+
+ if (ma_format_from_pulse(ss.format) == ma_format_unknown) {
+ if (ma_is_little_endian()) {
+ ss.format = MA_PA_SAMPLE_FLOAT32LE;
+ } else {
+ ss.format = MA_PA_SAMPLE_FLOAT32BE;
+ }
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n");
+ }
+ if (ss.rate == 0) {
+ ss.rate = MA_DEFAULT_SAMPLE_RATE;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate);
+ }
+ if (ss.channels == 0) {
+ ss.channels = MA_DEFAULT_CHANNELS;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels);
+ }
+
+ /* We now have enough information to calculate our actual period size in frames. */
+ pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, ss.rate, pConfig->performanceProfile);
+
+ attr = ma_device__pa_buffer_attr_new(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->periodCount, &ss);
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames);
+
+ pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap);
+ if (pDevice->pulse.pStreamCapture == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.\n");
+ result = MA_ERROR;
+ goto on_error0;
+ }
+
+
+ /* The callback needs to be set before connecting the stream. */
+ ((ma_pa_stream_set_read_callback_proc)pDevice->pContext->pulse.pa_stream_set_read_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_read__pulse, pDevice);
+
+ /* State callback for checking when the device has been corked. */
+ ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_suspended__pulse, pDevice);
+
+ /* Rerouting notification. */
+ ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_rerouted__pulse, pDevice);
+
+
+ /* Connect after we've got all of our internal state set up. */
+ streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
+ if (devCapture != NULL) {
+ streamFlags |= MA_PA_STREAM_DONT_MOVE;
+ }
+
+ error = ((ma_pa_stream_connect_record_proc)pDevice->pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags);
+ if (error != MA_PA_OK) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream.");
+ result = ma_result_from_pulse(error);
+ goto on_error1;
+ }
+
+ result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (result != MA_SUCCESS) {
+ goto on_error2;
+ }
+
+
+ /* Internal format. */
+ pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualSS != NULL) {
+ ss = *pActualSS;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate);
+ } else {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve capture sample spec.\n");
+ }
+
+ pDescriptorCapture->format = ma_format_from_pulse(ss.format);
+ pDescriptorCapture->channels = ss.channels;
+ pDescriptorCapture->sampleRate = ss.rate;
+
+ if (pDescriptorCapture->format == ma_format_unknown || pDescriptorCapture->channels == 0 || pDescriptorCapture->sampleRate == 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Capture sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorCapture->format), pDescriptorCapture->channels, pDescriptorCapture->sampleRate);
+ result = MA_ERROR;
+ goto on_error4;
+ }
+
+ /* Internal channel map. */
+ pActualCMap = ((ma_pa_stream_get_channel_map_proc)pDevice->pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualCMap != NULL) {
+ cmap = *pActualCMap;
+ }
+
+ for (iChannel = 0; iChannel < pDescriptorCapture->channels; ++iChannel) {
+ pDescriptorCapture->channelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
+ }
+
+
+ /* Buffer. */
+ pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualAttr != NULL) {
+ attr = *pActualAttr;
+ }
+
+ if (attr.fragsize > 0) {
+ pDescriptorPlayback->periodCount = ma_max(attr.maxlength / attr.fragsize, 1);
+ } else {
+ pDescriptorPlayback->periodCount = 1;
+ }
+
+ pDescriptorCapture->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / pDescriptorCapture->periodCount;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames);
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ result = ma_context_get_sink_info__pulse(pDevice->pContext, devPlayback, &sinkInfo);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.\n");
+ goto on_error2;
+ }
+
+ ss = sinkInfo.sample_spec;
+ cmap = sinkInfo.channel_map;
+
+ if (ma_format_from_pulse(ss.format) == ma_format_unknown) {
+ if (ma_is_little_endian()) {
+ ss.format = MA_PA_SAMPLE_FLOAT32LE;
+ } else {
+ ss.format = MA_PA_SAMPLE_FLOAT32BE;
+ }
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n");
+ }
+ if (ss.rate == 0) {
+ ss.rate = MA_DEFAULT_SAMPLE_RATE;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate);
+ }
+ if (ss.channels == 0) {
+ ss.channels = MA_DEFAULT_CHANNELS;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels);
+ }
+
+ /* We now have enough information to calculate the actual buffer size in frames. */
+ pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, ss.rate, pConfig->performanceProfile);
+
+ attr = ma_device__pa_buffer_attr_new(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->periodCount, &ss);
+
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames);
+
+ pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap);
+ if (pDevice->pulse.pStreamPlayback == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.\n");
+ result = MA_ERROR;
+ goto on_error2;
+ }
+
+
+ /*
+ Note that this callback will be fired as soon as the stream is connected, even though it's started as corked. The callback needs to handle a
+ device state of ma_device_state_uninitialized.
+ */
+ ((ma_pa_stream_set_write_callback_proc)pDevice->pContext->pulse.pa_stream_set_write_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_write__pulse, pDevice);
+
+ /* State callback for checking when the device has been corked. */
+ ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_suspended__pulse, pDevice);
+
+ /* Rerouting notification. */
+ ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_rerouted__pulse, pDevice);
+
+
+ /* Connect after we've got all of our internal state set up. */
+ streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
+ if (devPlayback != NULL) {
+ streamFlags |= MA_PA_STREAM_DONT_MOVE;
+ }
+
+ error = ((ma_pa_stream_connect_playback_proc)pDevice->pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL);
+ if (error != MA_PA_OK) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream.");
+ result = ma_result_from_pulse(error);
+ goto on_error3;
+ }
+
+ result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (result != MA_SUCCESS) {
+ goto on_error3;
+ }
+
+
+ /* Internal format. */
+ pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualSS != NULL) {
+ ss = *pActualSS;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate);
+ } else {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve playback sample spec.\n");
+ }
+
+ pDescriptorPlayback->format = ma_format_from_pulse(ss.format);
+ pDescriptorPlayback->channels = ss.channels;
+ pDescriptorPlayback->sampleRate = ss.rate;
+
+ if (pDescriptorPlayback->format == ma_format_unknown || pDescriptorPlayback->channels == 0 || pDescriptorPlayback->sampleRate == 0) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Playback sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorPlayback->format), pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate);
+ result = MA_ERROR;
+ goto on_error4;
+ }
+
+ /* Internal channel map. */
+ pActualCMap = ((ma_pa_stream_get_channel_map_proc)pDevice->pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualCMap != NULL) {
+ cmap = *pActualCMap;
+ }
+
+ for (iChannel = 0; iChannel < pDescriptorPlayback->channels; ++iChannel) {
+ pDescriptorPlayback->channelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
+ }
+
+
+ /* Buffer. */
+ pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualAttr != NULL) {
+ attr = *pActualAttr;
+ }
+
+ if (attr.tlength > 0) {
+ pDescriptorPlayback->periodCount = ma_max(attr.maxlength / attr.tlength, 1);
+ } else {
+ pDescriptorPlayback->periodCount = 1;
+ }
+
+ pDescriptorPlayback->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels) / pDescriptorPlayback->periodCount;
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames);
+ }
+
+
+ /*
+ We need a ring buffer for handling duplex mode. We can use the main duplex ring buffer in the main
+ part of the ma_device struct. We cannot, however, depend on ma_device_init() initializing this for
+ us later on because that will only do it if it's a fully asynchronous backend - i.e. the
+ onDeviceDataLoop callback is NULL, which is not the case for PulseAudio.
+ */
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_format rbFormat = (format != ma_format_unknown) ? format : pDescriptorCapture->format;
+ ma_uint32 rbChannels = (channels > 0) ? channels : pDescriptorCapture->channels;
+ ma_uint32 rbSampleRate = (sampleRate > 0) ? sampleRate : pDescriptorCapture->sampleRate;
+
+ result = ma_duplex_rb_init(rbFormat, rbChannels, rbSampleRate, pDescriptorCapture->sampleRate, pDescriptorCapture->periodSizeInFrames, &pDevice->pContext->allocationCallbacks, &pDevice->duplexRB);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize ring buffer. %s.\n", ma_result_description(result));
+ goto on_error4;
+ }
+ }
+
+ return MA_SUCCESS;
+
+
+on_error4:
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ }
+on_error3:
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ }
+on_error2:
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ }
+on_error1:
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ }
+on_error0:
+ return result;
+}
+
+
+static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData)
+{
+ ma_bool32* pIsSuccessful = (ma_bool32*)pUserData;
+ MA_ASSERT(pIsSuccessful != NULL);
+
+ *pIsSuccessful = (ma_bool32)success;
+
+ (void)pStream; /* Unused. */
+}
+
+static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork)
+{
+ ma_context* pContext = pDevice->pContext;
+ ma_bool32 wasSuccessful;
+ ma_pa_stream* pStream;
+ ma_pa_operation* pOP;
+ ma_result result;
+
+ /* This should not be called with a duplex device type. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ wasSuccessful = MA_FALSE;
+
+ pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback);
+ MA_ASSERT(pStream != NULL);
+
+ pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful);
+ if (pOP == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream.");
+ return MA_ERROR;
+ }
+
+ result = ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork.");
+ return result;
+ }
+
+ if (!wasSuccessful) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to %s PulseAudio stream.", (cork) ? "stop" : "start");
+ return MA_ERROR;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__pulse(ma_device* pDevice)
+{
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /*
+ We need to fill some data before uncorking. Not doing this will result in the write callback
+ never getting fired. We're not going to abort if writing fails because I still want the device
+ to get uncorked.
+ */
+ ma_device_write_to_stream__pulse(pDevice, (ma_pa_stream*)(pDevice->pulse.pStreamPlayback), NULL); /* No need to check the result here. Always want to fall through an uncork.*/
+
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__pulse(ma_device* pDevice)
+{
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /*
+ Ideally we would drain the device here, but there's been cases where PulseAudio seems to be
+ broken on some systems to the point where no audio processing seems to happen. When this
+ happens, draining never completes and we get stuck here. For now I'm disabling draining of
+ the device so we don't just freeze the application.
+ */
+ #if 0
+ ma_pa_operation* pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful);
+ ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP);
+ #endif
+
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_data_loop__pulse(ma_device* pDevice)
+{
+ int resultPA;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* NOTE: Don't start the device here. It'll be done at a higher level. */
+
+ /*
+ All data is handled through callbacks. All we need to do is iterate over the main loop and let
+ the callbacks deal with it.
+ */
+ while (ma_device_get_state(pDevice) == ma_device_state_started) {
+ resultPA = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
+ if (resultPA < 0) {
+ break;
+ }
+ }
+
+ /* NOTE: Don't stop the device here. It'll be done at a higher level. */
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_data_loop_wakeup__pulse(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ((ma_pa_mainloop_wakeup_proc)pDevice->pContext->pulse.pa_mainloop_wakeup)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_uninit__pulse(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_pulseaudio);
+
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pContext->pulse.pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pContext->pulse.pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pContext->pulse.pMainLoop);
+
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+#endif
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__pulse(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+ ma_result result;
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libpulseNames[] = {
+ "libpulse.so",
+ "libpulse.so.0"
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libpulseNames); ++i) {
+ pContext->pulse.pulseSO = ma_dlopen(pContext, libpulseNames[i]);
+ if (pContext->pulse.pulseSO != NULL) {
+ break;
+ }
+ }
+
+ if (pContext->pulse.pulseSO == NULL) {
+ return MA_NO_BACKEND;
+ }
+
+ pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_new");
+ pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_free");
+ pContext->pulse.pa_mainloop_quit = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_quit");
+ pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_get_api");
+ pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_iterate");
+ pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_wakeup");
+ pContext->pulse.pa_threaded_mainloop_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_new");
+ pContext->pulse.pa_threaded_mainloop_free = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_free");
+ pContext->pulse.pa_threaded_mainloop_start = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_start");
+ pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_stop");
+ pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_lock");
+ pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_unlock");
+ pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_wait");
+ pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_signal");
+ pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_accept");
+ pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_get_retval");
+ pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_get_api");
+ pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_in_thread");
+ pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_threaded_mainloop_set_name");
+ pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_new");
+ pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_unref");
+ pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_connect");
+ pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_disconnect");
+ pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_set_state_callback");
+ pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_state");
+ pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_list");
+ pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_list");
+ pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name");
+ pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_by_name");
+ pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_unref");
+ pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_get_state");
+ pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_init_extend");
+ pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_valid");
+ pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_compatible");
+ pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_new");
+ pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_unref");
+ pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_playback");
+ pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_record");
+ pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_disconnect");
+ pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_state");
+ pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_sample_spec");
+ pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_channel_map");
+ pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_buffer_attr");
+ pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_buffer_attr");
+ pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_device_name");
+ pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_write_callback");
+ pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_read_callback");
+ pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_suspended_callback");
+ pContext->pulse.pa_stream_set_moved_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_moved_callback");
+ pContext->pulse.pa_stream_is_suspended = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_is_suspended");
+ pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_flush");
+ pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drain");
+ pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_is_corked");
+ pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_cork");
+ pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_trigger");
+ pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_begin_write");
+ pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_write");
+ pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_peek");
+ pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drop");
+ pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_writable_size");
+ pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_readable_size");
+#else
+ /* This strange assignment system is just for type safety. */
+ ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new;
+ ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free;
+ ma_pa_mainloop_quit_proc _pa_mainloop_quit = pa_mainloop_quit;
+ ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api;
+ ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate;
+ ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup;
+ ma_pa_threaded_mainloop_new_proc _pa_threaded_mainloop_new = pa_threaded_mainloop_new;
+ ma_pa_threaded_mainloop_free_proc _pa_threaded_mainloop_free = pa_threaded_mainloop_free;
+ ma_pa_threaded_mainloop_start_proc _pa_threaded_mainloop_start = pa_threaded_mainloop_start;
+ ma_pa_threaded_mainloop_stop_proc _pa_threaded_mainloop_stop = pa_threaded_mainloop_stop;
+ ma_pa_threaded_mainloop_lock_proc _pa_threaded_mainloop_lock = pa_threaded_mainloop_lock;
+ ma_pa_threaded_mainloop_unlock_proc _pa_threaded_mainloop_unlock = pa_threaded_mainloop_unlock;
+ ma_pa_threaded_mainloop_wait_proc _pa_threaded_mainloop_wait = pa_threaded_mainloop_wait;
+ ma_pa_threaded_mainloop_signal_proc _pa_threaded_mainloop_signal = pa_threaded_mainloop_signal;
+ ma_pa_threaded_mainloop_accept_proc _pa_threaded_mainloop_accept = pa_threaded_mainloop_accept;
+ ma_pa_threaded_mainloop_get_retval_proc _pa_threaded_mainloop_get_retval = pa_threaded_mainloop_get_retval;
+ ma_pa_threaded_mainloop_get_api_proc _pa_threaded_mainloop_get_api = pa_threaded_mainloop_get_api;
+ ma_pa_threaded_mainloop_in_thread_proc _pa_threaded_mainloop_in_thread = pa_threaded_mainloop_in_thread;
+ ma_pa_threaded_mainloop_set_name_proc _pa_threaded_mainloop_set_name = pa_threaded_mainloop_set_name;
+ ma_pa_context_new_proc _pa_context_new = pa_context_new;
+ ma_pa_context_unref_proc _pa_context_unref = pa_context_unref;
+ ma_pa_context_connect_proc _pa_context_connect = pa_context_connect;
+ ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect;
+ ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback;
+ ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state;
+ ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list;
+ ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list;
+ ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name;
+ ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name;
+ ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref;
+ ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state;
+ ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend;
+ ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid;
+ ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible;
+ ma_pa_stream_new_proc _pa_stream_new = pa_stream_new;
+ ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref;
+ ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback;
+ ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record;
+ ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect;
+ ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state;
+ ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec;
+ ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map;
+ ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr;
+ ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr;
+ ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name;
+ ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback;
+ ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback;
+ ma_pa_stream_set_suspended_callback_proc _pa_stream_set_suspended_callback = pa_stream_set_suspended_callback;
+ ma_pa_stream_set_moved_callback_proc _pa_stream_set_moved_callback = pa_stream_set_moved_callback;
+ ma_pa_stream_is_suspended_proc _pa_stream_is_suspended = pa_stream_is_suspended;
+ ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush;
+ ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain;
+ ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked;
+ ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork;
+ ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger;
+ ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write;
+ ma_pa_stream_write_proc _pa_stream_write = pa_stream_write;
+ ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek;
+ ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop;
+ ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size;
+ ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size;
+
+ pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new;
+ pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free;
+ pContext->pulse.pa_mainloop_quit = (ma_proc)_pa_mainloop_quit;
+ pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api;
+ pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate;
+ pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup;
+ pContext->pulse.pa_threaded_mainloop_new = (ma_proc)_pa_threaded_mainloop_new;
+ pContext->pulse.pa_threaded_mainloop_free = (ma_proc)_pa_threaded_mainloop_free;
+ pContext->pulse.pa_threaded_mainloop_start = (ma_proc)_pa_threaded_mainloop_start;
+ pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)_pa_threaded_mainloop_stop;
+ pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)_pa_threaded_mainloop_lock;
+ pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)_pa_threaded_mainloop_unlock;
+ pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)_pa_threaded_mainloop_wait;
+ pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)_pa_threaded_mainloop_signal;
+ pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)_pa_threaded_mainloop_accept;
+ pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)_pa_threaded_mainloop_get_retval;
+ pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)_pa_threaded_mainloop_get_api;
+ pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)_pa_threaded_mainloop_in_thread;
+ pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)_pa_threaded_mainloop_set_name;
+ pContext->pulse.pa_context_new = (ma_proc)_pa_context_new;
+ pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref;
+ pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect;
+ pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect;
+ pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback;
+ pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state;
+ pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list;
+ pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list;
+ pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name;
+ pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name;
+ pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref;
+ pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state;
+ pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend;
+ pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid;
+ pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible;
+ pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new;
+ pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref;
+ pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback;
+ pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record;
+ pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect;
+ pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state;
+ pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec;
+ pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map;
+ pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr;
+ pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr;
+ pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name;
+ pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback;
+ pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback;
+ pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)_pa_stream_set_suspended_callback;
+ pContext->pulse.pa_stream_set_moved_callback = (ma_proc)_pa_stream_set_moved_callback;
+ pContext->pulse.pa_stream_is_suspended = (ma_proc)_pa_stream_is_suspended;
+ pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush;
+ pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain;
+ pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked;
+ pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork;
+ pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger;
+ pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write;
+ pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write;
+ pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek;
+ pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop;
+ pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size;
+ pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size;
+#endif
+
+ /* We need to make a copy of the application and server names so we can pass them to the pa_context of each device. */
+ pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks);
+ if (pContext->pulse.pApplicationName == NULL && pConfig->pulse.pApplicationName != NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks);
+ if (pContext->pulse.pServerName == NULL && pConfig->pulse.pServerName != NULL) {
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ result = ma_init_pa_mainloop_and_pa_context__pulse(pContext, pConfig->pulse.pApplicationName, pConfig->pulse.pServerName, pConfig->pulse.tryAutoSpawn, &pContext->pulse.pMainLoop, &pContext->pulse.pPulseContext);
+ if (result != MA_SUCCESS) {
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+ #endif
+ return result;
+ }
+
+ /* With pa_mainloop we run a synchronous backend, but we implement our own main loop. */
+ pCallbacks->onContextInit = ma_context_init__pulse;
+ pCallbacks->onContextUninit = ma_context_uninit__pulse;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__pulse;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__pulse;
+ pCallbacks->onDeviceInit = ma_device_init__pulse;
+ pCallbacks->onDeviceUninit = ma_device_uninit__pulse;
+ pCallbacks->onDeviceStart = ma_device_start__pulse;
+ pCallbacks->onDeviceStop = ma_device_stop__pulse;
+ pCallbacks->onDeviceRead = NULL; /* Not used because we're implementing onDeviceDataLoop. */
+ pCallbacks->onDeviceWrite = NULL; /* Not used because we're implementing onDeviceDataLoop. */
+ pCallbacks->onDeviceDataLoop = ma_device_data_loop__pulse;
+ pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__pulse;
+
+ return MA_SUCCESS;
+}
+#endif
+
+
+/******************************************************************************
+
+JACK Backend
+
+******************************************************************************/
+#ifdef MA_HAS_JACK
+
+/* It is assumed jack.h is available when compile-time linking is being used. */
+#ifdef MA_NO_RUNTIME_LINKING
+#include <jack/jack.h>
+
+typedef jack_nframes_t ma_jack_nframes_t;
+typedef jack_options_t ma_jack_options_t;
+typedef jack_status_t ma_jack_status_t;
+typedef jack_client_t ma_jack_client_t;
+typedef jack_port_t ma_jack_port_t;
+typedef JackProcessCallback ma_JackProcessCallback;
+typedef JackBufferSizeCallback ma_JackBufferSizeCallback;
+typedef JackShutdownCallback ma_JackShutdownCallback;
+#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE
+#define ma_JackNoStartServer JackNoStartServer
+#define ma_JackPortIsInput JackPortIsInput
+#define ma_JackPortIsOutput JackPortIsOutput
+#define ma_JackPortIsPhysical JackPortIsPhysical
+#else
+typedef ma_uint32 ma_jack_nframes_t;
+typedef int ma_jack_options_t;
+typedef int ma_jack_status_t;
+typedef struct ma_jack_client_t ma_jack_client_t;
+typedef struct ma_jack_port_t ma_jack_port_t;
+typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg);
+typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg);
+typedef void (* ma_JackShutdownCallback) (void* arg);
+#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio"
+#define ma_JackNoStartServer 1
+#define ma_JackPortIsInput 1
+#define ma_JackPortIsOutput 2
+#define ma_JackPortIsPhysical 4
+#endif
+
+typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...);
+typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_client_name_size_proc) (void);
+typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg);
+typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg);
+typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg);
+typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client);
+typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client);
+typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags);
+typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port);
+typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size);
+typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port);
+typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes);
+typedef void (* ma_jack_free_proc) (void* ptr);
+
+static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient)
+{
+ size_t maxClientNameSize;
+ char clientName[256];
+ ma_jack_status_t status;
+ ma_jack_client_t* pClient;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppClient != NULL);
+
+ if (ppClient) {
+ *ppClient = NULL;
+ }
+
+ maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */
+ ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1);
+
+ pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL);
+ if (pClient == NULL) {
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ if (ppClient) {
+ *ppClient = pClient;
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+
+ (void)cbResult; /* For silencing a static analysis warning. */
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ ma_jack_client_t* pClient;
+ ma_result result;
+ const char** ppPorts;
+
+ MA_ASSERT(pContext != NULL);
+
+ if (pDeviceID != NULL && pDeviceID->jack != 0) {
+ return MA_NO_DEVICE; /* Don't know the device. */
+ }
+
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+
+ /* Jack only uses default devices. */
+ pDeviceInfo->isDefault = MA_TRUE;
+
+ /* Jack only supports f32 and has a specific channel count and sample rate. */
+ pDeviceInfo->nativeDataFormats[0].format = ma_format_f32;
+
+ /* The channel count and sample rate can only be determined by opening the device. */
+ result = ma_context_open_client__jack(pContext, &pClient);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.");
+ return result;
+ }
+
+ pDeviceInfo->nativeDataFormats[0].sampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient);
+ pDeviceInfo->nativeDataFormats[0].channels = 0;
+
+ ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput));
+ if (ppPorts == NULL) {
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ while (ppPorts[pDeviceInfo->nativeDataFormats[0].channels] != NULL) {
+ pDeviceInfo->nativeDataFormats[0].channels += 1;
+ }
+
+ pDeviceInfo->nativeDataFormats[0].flags = 0;
+ pDeviceInfo->nativeDataFormatCount = 1;
+
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_uninit__jack(ma_device* pDevice)
+{
+ ma_context* pContext;
+
+ MA_ASSERT(pDevice != NULL);
+
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
+
+ if (pDevice->jack.pClient != NULL) {
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient);
+ }
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks);
+ ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks);
+ ma_free(pDevice->jack.ppPortsPlayback, &pDevice->pContext->allocationCallbacks);
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_device__jack_shutdown_callback(void* pUserData)
+{
+ /* JACK died. Stop the device. */
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ ma_device_stop(pDevice);
+}
+
+static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat));
+ float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks);
+ if (pNewBuffer == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks);
+
+ pDevice->jack.pIntermediaryBufferCapture = pNewBuffer;
+ pDevice->playback.internalPeriodSizeInFrames = frameCount;
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat));
+ float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks);
+ if (pNewBuffer == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks);
+
+ pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer;
+ pDevice->playback.internalPeriodSizeInFrames = frameCount;
+ }
+
+ return 0;
+}
+
+static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData)
+{
+ ma_device* pDevice;
+ ma_context* pContext;
+ ma_uint32 iChannel;
+
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ /* Channels need to be interleaved. */
+ for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
+ const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[iChannel], frameCount);
+ if (pSrc != NULL) {
+ float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel;
+ ma_jack_nframes_t iFrame;
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ *pDst = *pSrc;
+
+ pDst += pDevice->capture.internalChannels;
+ pSrc += 1;
+ }
+ }
+ }
+
+ ma_device_handle_backend_data_callback(pDevice, NULL, pDevice->jack.pIntermediaryBufferCapture, frameCount);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_device_handle_backend_data_callback(pDevice, pDevice->jack.pIntermediaryBufferPlayback, NULL, frameCount);
+
+ /* Channels need to be deinterleaved. */
+ for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
+ float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[iChannel], frameCount);
+ if (pDst != NULL) {
+ const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel;
+ ma_jack_nframes_t iFrame;
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ *pDst = *pSrc;
+
+ pDst += 1;
+ pSrc += pDevice->playback.internalChannels;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static ma_result ma_device_init__jack(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ ma_result result;
+ ma_uint32 periodSizeInFrames;
+
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDevice != NULL);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Loopback mode not supported.");
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* Only supporting default devices with JACK. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->pDeviceID != NULL && pDescriptorPlayback->pDeviceID->jack != 0) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->pDeviceID != NULL && pDescriptorCapture->pDeviceID->jack != 0)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Only default devices are supported.");
+ return MA_NO_DEVICE;
+ }
+
+ /* No exclusive mode with the JACK backend. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Exclusive mode not supported.");
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ /* Open the client. */
+ result = ma_context_open_client__jack(pDevice->pContext, (ma_jack_client_t**)&pDevice->jack.pClient);
+ if (result != MA_SUCCESS) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.");
+ return result;
+ }
+
+ /* Callbacks. */
+ if (((ma_jack_set_process_callback_proc)pDevice->pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+ if (((ma_jack_set_buffer_size_callback_proc)pDevice->pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ ((ma_jack_on_shutdown_proc)pDevice->pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice);
+
+
+ /* The buffer size in frames can change. */
+ periodSizeInFrames = ((ma_jack_get_buffer_size_proc)pDevice->pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient);
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPort;
+ const char** ppPorts;
+
+ pDescriptorCapture->format = ma_format_f32;
+ pDescriptorCapture->channels = 0;
+ pDescriptorCapture->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels);
+
+ ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput);
+ if (ppPorts == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ /* Need to count the number of ports first so we can allocate some memory. */
+ while (ppPorts[pDescriptorCapture->channels] != NULL) {
+ pDescriptorCapture->channels += 1;
+ }
+
+ pDevice->jack.ppPortsCapture = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsCapture) * pDescriptorCapture->channels, &pDevice->pContext->allocationCallbacks);
+ if (pDevice->jack.ppPortsCapture == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ for (iPort = 0; iPort < pDescriptorCapture->channels; iPort += 1) {
+ char name[64];
+ ma_strcpy_s(name, sizeof(name), "capture");
+ ma_itoa_s((int)iPort, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */
+
+ pDevice->jack.ppPortsCapture[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0);
+ if (pDevice->jack.ppPortsCapture[iPort] == NULL) {
+ ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts);
+ ma_device_uninit__jack(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+ }
+
+ ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts);
+
+ pDescriptorCapture->periodSizeInFrames = periodSizeInFrames;
+ pDescriptorCapture->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */
+
+ pDevice->jack.pIntermediaryBufferCapture = (float*)ma_calloc(pDescriptorCapture->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels), &pDevice->pContext->allocationCallbacks);
+ if (pDevice->jack.pIntermediaryBufferCapture == NULL) {
+ ma_device_uninit__jack(pDevice);
+ return MA_OUT_OF_MEMORY;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPort;
+ const char** ppPorts;
+
+ pDescriptorPlayback->format = ma_format_f32;
+ pDescriptorPlayback->channels = 0;
+ pDescriptorPlayback->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels);
+
+ ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput);
+ if (ppPorts == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ /* Need to count the number of ports first so we can allocate some memory. */
+ while (ppPorts[pDescriptorPlayback->channels] != NULL) {
+ pDescriptorPlayback->channels += 1;
+ }
+
+ pDevice->jack.ppPortsPlayback = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsPlayback) * pDescriptorPlayback->channels, &pDevice->pContext->allocationCallbacks);
+ if (pDevice->jack.ppPortsPlayback == NULL) {
+ ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ for (iPort = 0; iPort < pDescriptorPlayback->channels; iPort += 1) {
+ char name[64];
+ ma_strcpy_s(name, sizeof(name), "playback");
+ ma_itoa_s((int)iPort, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */
+
+ pDevice->jack.ppPortsPlayback[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0);
+ if (pDevice->jack.ppPortsPlayback[iPort] == NULL) {
+ ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts);
+ ma_device_uninit__jack(pDevice);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+ }
+
+ ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts);
+
+ pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames;
+ pDescriptorPlayback->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */
+
+ pDevice->jack.pIntermediaryBufferPlayback = (float*)ma_calloc(pDescriptorPlayback->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels), &pDevice->pContext->allocationCallbacks);
+ if (pDevice->jack.pIntermediaryBufferPlayback == NULL) {
+ ma_device_uninit__jack(pDevice);
+ return MA_OUT_OF_MEMORY;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_start__jack(ma_device* pDevice)
+{
+ ma_context* pContext = pDevice->pContext;
+ int resultJACK;
+ size_t i;
+
+ resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient);
+ if (resultJACK != 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client.");
+ return MA_FAILED_TO_START_BACKEND_DEVICE;
+ }
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput);
+ if (ppServerPorts == NULL) {
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.");
+ return MA_ERROR;
+ }
+
+ for (i = 0; ppServerPorts[i] != NULL; ++i) {
+ const char* pServerPort = ppServerPorts[i];
+ const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[i]);
+
+ resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort);
+ if (resultJACK != 0) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.");
+ return MA_ERROR;
+ }
+ }
+
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput);
+ if (ppServerPorts == NULL) {
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.");
+ return MA_ERROR;
+ }
+
+ for (i = 0; ppServerPorts[i] != NULL; ++i) {
+ const char* pServerPort = ppServerPorts[i];
+ const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[i]);
+
+ resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort);
+ if (resultJACK != 0) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.");
+ return MA_ERROR;
+ }
+ }
+
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__jack(ma_device* pDevice)
+{
+ ma_context* pContext = pDevice->pContext;
+
+ if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client.");
+ return MA_ERROR;
+ }
+
+ ma_device__on_notification_stopped(pDevice);
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_uninit__jack(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_jack);
+
+ ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks);
+ pContext->jack.pClientName = NULL;
+
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->jack.jackSO);
+#endif
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__jack(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libjackNames[] = {
+#ifdef MA_WIN32
+ "libjack.dll",
+ "libjack64.dll"
+#else
+ "libjack.so",
+ "libjack.so.0"
+#endif
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libjackNames); ++i) {
+ pContext->jack.jackSO = ma_dlopen(pContext, libjackNames[i]);
+ if (pContext->jack.jackSO != NULL) {
+ break;
+ }
+ }
+
+ if (pContext->jack.jackSO == NULL) {
+ return MA_NO_BACKEND;
+ }
+
+ pContext->jack.jack_client_open = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_open");
+ pContext->jack.jack_client_close = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_close");
+ pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_name_size");
+ pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_process_callback");
+ pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_buffer_size_callback");
+ pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_on_shutdown");
+ pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_sample_rate");
+ pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_buffer_size");
+ pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_ports");
+ pContext->jack.jack_activate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_activate");
+ pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_deactivate");
+ pContext->jack.jack_connect = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_connect");
+ pContext->jack.jack_port_register = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_register");
+ pContext->jack.jack_port_name = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_name");
+ pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_get_buffer");
+ pContext->jack.jack_free = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_free");
+#else
+ /*
+ This strange assignment system is here just to ensure type safety of miniaudio's function pointer
+ types. If anything differs slightly the compiler should throw a warning.
+ */
+ ma_jack_client_open_proc _jack_client_open = jack_client_open;
+ ma_jack_client_close_proc _jack_client_close = jack_client_close;
+ ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size;
+ ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback;
+ ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback;
+ ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown;
+ ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate;
+ ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size;
+ ma_jack_get_ports_proc _jack_get_ports = jack_get_ports;
+ ma_jack_activate_proc _jack_activate = jack_activate;
+ ma_jack_deactivate_proc _jack_deactivate = jack_deactivate;
+ ma_jack_connect_proc _jack_connect = jack_connect;
+ ma_jack_port_register_proc _jack_port_register = jack_port_register;
+ ma_jack_port_name_proc _jack_port_name = jack_port_name;
+ ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer;
+ ma_jack_free_proc _jack_free = jack_free;
+
+ pContext->jack.jack_client_open = (ma_proc)_jack_client_open;
+ pContext->jack.jack_client_close = (ma_proc)_jack_client_close;
+ pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size;
+ pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback;
+ pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback;
+ pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown;
+ pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate;
+ pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size;
+ pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports;
+ pContext->jack.jack_activate = (ma_proc)_jack_activate;
+ pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate;
+ pContext->jack.jack_connect = (ma_proc)_jack_connect;
+ pContext->jack.jack_port_register = (ma_proc)_jack_port_register;
+ pContext->jack.jack_port_name = (ma_proc)_jack_port_name;
+ pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer;
+ pContext->jack.jack_free = (ma_proc)_jack_free;
+#endif
+
+ if (pConfig->jack.pClientName != NULL) {
+ pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks);
+ }
+ pContext->jack.tryStartServer = pConfig->jack.tryStartServer;
+
+ /*
+ Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting
+ a temporary client.
+ */
+ {
+ ma_jack_client_t* pDummyClient;
+ ma_result result = ma_context_open_client__jack(pContext, &pDummyClient);
+ if (result != MA_SUCCESS) {
+ ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->jack.jackSO);
+ #endif
+ return MA_NO_BACKEND;
+ }
+
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient);
+ }
+
+
+ pCallbacks->onContextInit = ma_context_init__jack;
+ pCallbacks->onContextUninit = ma_context_uninit__jack;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__jack;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__jack;
+ pCallbacks->onDeviceInit = ma_device_init__jack;
+ pCallbacks->onDeviceUninit = ma_device_uninit__jack;
+ pCallbacks->onDeviceStart = ma_device_start__jack;
+ pCallbacks->onDeviceStop = ma_device_stop__jack;
+ pCallbacks->onDeviceRead = NULL; /* Not used because JACK is asynchronous. */
+ pCallbacks->onDeviceWrite = NULL; /* Not used because JACK is asynchronous. */
+ pCallbacks->onDeviceDataLoop = NULL; /* Not used because JACK is asynchronous. */
+
+ return MA_SUCCESS;
+}
+#endif /* JACK */
+
+
+
+/******************************************************************************
+
+Core Audio Backend
+
+References
+==========
+- Technical Note TN2091: Device input using the HAL Output Audio Unit
+ https://developer.apple.com/library/archive/technotes/tn2091/_index.html
+
+******************************************************************************/
+#ifdef MA_HAS_COREAUDIO
+#include <TargetConditionals.h>
+
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1
+ #define MA_APPLE_MOBILE
+ #if defined(TARGET_OS_TV) && TARGET_OS_TV == 1
+ #define MA_APPLE_TV
+ #endif
+ #if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1
+ #define MA_APPLE_WATCH
+ #endif
+ #if __has_feature(objc_arc)
+ #define MA_BRIDGE_TRANSFER __bridge_transfer
+ #define MA_BRIDGE_RETAINED __bridge_retained
+ #else
+ #define MA_BRIDGE_TRANSFER
+ #define MA_BRIDGE_RETAINED
+ #endif
+#else
+ #define MA_APPLE_DESKTOP
+#endif
+
+#if defined(MA_APPLE_DESKTOP)
+#include <CoreAudio/CoreAudio.h>
+#else
+#include <AVFoundation/AVFoundation.h>
+#endif
+
+#include <AudioToolbox/AudioToolbox.h>
+
+/* CoreFoundation */
+typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding);
+typedef void (* ma_CFRelease_proc)(CFTypeRef cf);
+
+/* CoreAudio */
+#if defined(MA_APPLE_DESKTOP)
+typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData);
+typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize);
+typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData);
+typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData);
+typedef OSStatus (* ma_AudioObjectRemovePropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData);
+#endif
+
+/* AudioToolbox */
+typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc);
+typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance);
+typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance);
+typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData);
+typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable);
+typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize);
+typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize);
+typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData);
+
+
+#define MA_COREAUDIO_OUTPUT_BUS 0
+#define MA_COREAUDIO_INPUT_BUS 1
+
+#if defined(MA_APPLE_DESKTOP)
+static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit);
+#endif
+
+/*
+Core Audio
+
+So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation
+apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose
+needing to figure out how this darn thing works, I'm going to outline a few things here.
+
+Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be
+able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen
+that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent
+and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the
+distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API.
+
+Most (all?) functions in the AudioObject API take a AudioObjectID as it's input. This is the device identifier. When
+retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific
+data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the
+devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be
+the central APIs for retrieving information about the system and specific devices.
+
+To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a
+structure with three variables and is used to identify which property you are getting or setting. The first is the "selector"
+which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is
+typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and
+kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to
+kAudioObjectPropertyElementMaster in miniaudio's case. I don't know of any cases where this would be set to anything different.
+
+Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size
+of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property
+address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the
+size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of
+AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count.
+*/
+
+static ma_result ma_result_from_OSStatus(OSStatus status)
+{
+ switch (status)
+ {
+ case noErr: return MA_SUCCESS;
+ #if defined(MA_APPLE_DESKTOP)
+ case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED;
+ case kAudioHardwareUnspecifiedError: return MA_ERROR;
+ case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION;
+ case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION;
+ case kAudioHardwareBadObjectError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadStreamError: return MA_INVALID_ARGS;
+ case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION;
+ case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED;
+ case kAudioDevicePermissionsError: return MA_ACCESS_DENIED;
+ #endif
+ default: return MA_ERROR;
+ }
+}
+
+#if 0
+static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit)
+{
+ switch (bit)
+ {
+ case kAudioChannelBit_Left: return MA_CHANNEL_LEFT;
+ case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT;
+ case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER;
+ case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE;
+ case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
+ case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
+ case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
+ case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
+ default: return MA_CHANNEL_NONE;
+ }
+}
+#endif
+
+static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut)
+{
+ MA_ASSERT(pDescription != NULL);
+ MA_ASSERT(pFormatOut != NULL);
+
+ *pFormatOut = ma_format_unknown; /* Safety. */
+
+ /* There's a few things miniaudio doesn't support. */
+ if (pDescription->mFormatID != kAudioFormatLinearPCM) {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ /* We don't support any non-packed formats that are aligned high. */
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ /* Only supporting native-endian. */
+ if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */
+ /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }*/
+
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) {
+ if (pDescription->mBitsPerChannel == 32) {
+ *pFormatOut = ma_format_f32;
+ return MA_SUCCESS;
+ }
+ } else {
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) {
+ if (pDescription->mBitsPerChannel == 16) {
+ *pFormatOut = ma_format_s16;
+ return MA_SUCCESS;
+ } else if (pDescription->mBitsPerChannel == 24) {
+ if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) {
+ *pFormatOut = ma_format_s24;
+ return MA_SUCCESS;
+ } else {
+ if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) {
+ /* TODO: Implement ma_format_s24_32. */
+ /**pFormatOut = ma_format_s24_32;*/
+ /*return MA_SUCCESS;*/
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ } else if (pDescription->mBitsPerChannel == 32) {
+ *pFormatOut = ma_format_s32;
+ return MA_SUCCESS;
+ }
+ } else {
+ if (pDescription->mBitsPerChannel == 8) {
+ *pFormatOut = ma_format_u8;
+ return MA_SUCCESS;
+ }
+ }
+ }
+
+ /* Getting here means the format is not supported. */
+ return MA_FORMAT_NOT_SUPPORTED;
+}
+
+#if defined(MA_APPLE_DESKTOP)
+static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label)
+{
+ switch (label)
+ {
+ case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER;
+ case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE;
+ case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
+ case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
+ case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
+ case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE;
+ case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0;
+ case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1;
+ case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2;
+ case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3;
+ case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4;
+ case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5;
+ case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6;
+ case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7;
+ case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8;
+ case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9;
+ case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10;
+ case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11;
+ case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12;
+ case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13;
+ case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14;
+ case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15;
+ case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE;
+
+ #if 0 /* Introduced in a later version of macOS. */
+ case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0;
+ case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1;
+ case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2;
+ case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3;
+ case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4;
+ case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5;
+ case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6;
+ case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7;
+ case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8;
+ case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9;
+ case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10;
+ case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11;
+ case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12;
+ case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13;
+ case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14;
+ case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15;
+ case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE;
+ #endif
+
+ default: return MA_CHANNEL_NONE;
+ }
+}
+
+static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel* pChannelMap, size_t channelMapCap)
+{
+ MA_ASSERT(pChannelLayout != NULL);
+
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
+ UInt32 iChannel;
+ for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions && iChannel < channelMapCap; ++iChannel) {
+ pChannelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel);
+ }
+ } else
+#if 0
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
+ /* This is the same kind of system that's used by Windows audio APIs. */
+ UInt32 iChannel = 0;
+ UInt32 iBit;
+ AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap;
+ for (iBit = 0; iBit < 32 && iChannel < channelMapCap; ++iBit) {
+ AudioChannelBitmap bit = bitmap & (1 << iBit);
+ if (bit != 0) {
+ pChannelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit);
+ }
+ }
+ } else
+#endif
+ {
+ /*
+ Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should
+ be updated to determine the mapping based on the tag.
+ */
+ UInt32 channelCount;
+
+ /* Our channel map retrieval APIs below take 32-bit integers, so we'll want to clamp the channel map capacity. */
+ if (channelMapCap > 0xFFFFFFFF) {
+ channelMapCap = 0xFFFFFFFF;
+ }
+
+ channelCount = ma_min(AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag), (UInt32)channelMapCap);
+
+ switch (pChannelLayout->mChannelLayoutTag)
+ {
+ case kAudioChannelLayoutTag_Mono:
+ case kAudioChannelLayoutTag_Stereo:
+ case kAudioChannelLayoutTag_StereoHeadphones:
+ case kAudioChannelLayoutTag_MatrixStereo:
+ case kAudioChannelLayoutTag_MidSide:
+ case kAudioChannelLayoutTag_XY:
+ case kAudioChannelLayoutTag_Binaural:
+ case kAudioChannelLayoutTag_Ambisonic_B_Format:
+ {
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount);
+ } break;
+
+ case kAudioChannelLayoutTag_Octagonal:
+ {
+ pChannelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ pChannelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ } /* Intentional fallthrough. */
+ case kAudioChannelLayoutTag_Hexagonal:
+ {
+ pChannelMap[5] = MA_CHANNEL_BACK_CENTER;
+ } /* Intentional fallthrough. */
+ case kAudioChannelLayoutTag_Pentagonal:
+ {
+ pChannelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ } /* Intentional fallghrough. */
+ case kAudioChannelLayoutTag_Quadraphonic:
+ {
+ pChannelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ pChannelMap[2] = MA_CHANNEL_BACK_LEFT;
+ pChannelMap[1] = MA_CHANNEL_RIGHT;
+ pChannelMap[0] = MA_CHANNEL_LEFT;
+ } break;
+
+ /* TODO: Add support for more tags here. */
+
+ default:
+ {
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount);
+ } break;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddressDevices;
+ UInt32 deviceObjectsDataSize;
+ OSStatus status;
+ AudioObjectID* pDeviceObjectIDs;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceCount != NULL);
+ MA_ASSERT(ppDeviceObjectIDs != NULL);
+
+ /* Safety. */
+ *pDeviceCount = 0;
+ *ppDeviceObjectIDs = NULL;
+
+ propAddressDevices.mSelector = kAudioHardwarePropertyDevices;
+ propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddressDevices.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks);
+ if (pDeviceObjectIDs == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs);
+ if (status != noErr) {
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID);
+ *ppDeviceObjectIDs = pDeviceObjectIDs;
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID)
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+
+ propAddress.mSelector = kAudioDevicePropertyDeviceUID;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ dataSize = sizeof(*pUID);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
+{
+ CFStringRef uid;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
+ return MA_ERROR;
+ }
+
+ ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(uid);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
+{
+ AudioObjectPropertyAddress propAddress;
+ CFStringRef deviceName = NULL;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+
+ propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ dataSize = sizeof(deviceName);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
+ return MA_ERROR;
+ }
+
+ ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(deviceName);
+ return MA_SUCCESS;
+}
+
+static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope)
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioBufferList* pBufferList;
+ ma_bool32 isSupported;
+
+ MA_ASSERT(pContext != NULL);
+
+ /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */
+ propAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
+ propAddress.mScope = scope;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return MA_FALSE;
+ }
+
+ pBufferList = (AudioBufferList*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pBufferList == NULL) {
+ return MA_FALSE; /* Out of memory. */
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList);
+ if (status != noErr) {
+ ma_free(pBufferList, &pContext->allocationCallbacks);
+ return MA_FALSE;
+ }
+
+ isSupported = MA_FALSE;
+ if (pBufferList->mNumberBuffers > 0) {
+ isSupported = MA_TRUE;
+ }
+
+ ma_free(pBufferList, &pContext->allocationCallbacks);
+ return isSupported;
+}
+
+static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID)
+{
+ return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput);
+}
+
+static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID)
+{
+ return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput);
+}
+
+
+static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioStreamRangedDescription* pDescriptions;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDescriptionCount != NULL);
+ MA_ASSERT(ppDescriptions != NULL);
+
+ /*
+ TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My
+ MacBook Pro uses s24/32 format, however, which miniaudio does not currently support.
+ */
+ propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pDescriptions == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions);
+ if (status != noErr) {
+ ma_free(pDescriptions, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *pDescriptionCount = dataSize / sizeof(*pDescriptions);
+ *ppDescriptions = pDescriptions;
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioChannelLayout* pChannelLayout;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppChannelLayout != NULL);
+
+ *ppChannelLayout = NULL; /* Safety. */
+
+ propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pChannelLayout == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout);
+ if (status != noErr) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *ppChannelLayout = pChannelLayout;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount)
+{
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pChannelCount != NULL);
+
+ *pChannelCount = 0; /* Safety. */
+
+ result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
+ *pChannelCount = pChannelLayout->mNumberChannelDescriptions;
+ } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
+ *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap);
+ } else {
+ *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag);
+ }
+
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+}
+
+#if 0
+static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap)
+{
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
+ if (result != MA_SUCCESS) {
+ return result; /* Rather than always failing here, would it be more robust to simply assume a default? */
+ }
+
+ result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap);
+ if (result != MA_SUCCESS) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
+ }
+
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
+}
+#endif
+
+static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioValueRange* pSampleRateRanges;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pSampleRateRangesCount != NULL);
+ MA_ASSERT(ppSampleRateRanges != NULL);
+
+ /* Safety. */
+ *pSampleRateRangesCount = 0;
+ *ppSampleRateRanges = NULL;
+
+ propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pSampleRateRanges == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges);
+ if (status != noErr) {
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges);
+ *ppSampleRateRanges = pSampleRateRanges;
+ return MA_SUCCESS;
+}
+
+#if 0
+static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut)
+{
+ UInt32 sampleRateRangeCount;
+ AudioValueRange* pSampleRateRanges;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pSampleRateOut != NULL);
+
+ *pSampleRateOut = 0; /* Safety. */
+
+ result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (sampleRateRangeCount == 0) {
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_ERROR; /* Should never hit this case should we? */
+ }
+
+ if (sampleRateIn == 0) {
+ /* Search in order of miniaudio's preferred priority. */
+ UInt32 iMALSampleRate;
+ for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) {
+ ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate];
+ UInt32 iCASampleRate;
+ for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) {
+ AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate];
+ if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) {
+ *pSampleRateOut = malSampleRate;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+ }
+ }
+
+ /*
+ If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this
+ case we just fall back to the first one reported by Core Audio.
+ */
+ MA_ASSERT(sampleRateRangeCount > 0);
+
+ *pSampleRateOut = pSampleRateRanges[0].mMinimum;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ } else {
+ /* Find the closest match to this sample rate. */
+ UInt32 currentAbsoluteDifference = INT32_MAX;
+ UInt32 iCurrentClosestRange = (UInt32)-1;
+ UInt32 iRange;
+ for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) {
+ if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) {
+ *pSampleRateOut = sampleRateIn;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ } else {
+ UInt32 absoluteDifference;
+ if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) {
+ absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn;
+ } else {
+ absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum;
+ }
+
+ if (currentAbsoluteDifference > absoluteDifference) {
+ currentAbsoluteDifference = absoluteDifference;
+ iCurrentClosestRange = iRange;
+ }
+ }
+ }
+
+ MA_ASSERT(iCurrentClosestRange != (UInt32)-1);
+
+ *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+
+ /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */
+ /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/
+ /*return MA_ERROR;*/
+}
+#endif
+
+static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut)
+{
+ AudioObjectPropertyAddress propAddress;
+ AudioValueRange bufferSizeRange;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pBufferSizeInFramesOut != NULL);
+
+ *pBufferSizeInFramesOut = 0; /* Safety. */
+
+ propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ dataSize = sizeof(bufferSizeRange);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ /* This is just a clamp. */
+ if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) {
+ *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum;
+ } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) {
+ *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum;
+ } else {
+ *pBufferSizeInFramesOut = bufferSizeInFramesIn;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pPeriodSizeInOut)
+{
+ ma_result result;
+ ma_uint32 chosenBufferSizeInFrames;
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pPeriodSizeInOut, &chosenBufferSizeInFrames);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */
+ propAddress.mSelector = kAudioDevicePropertyBufferFrameSize;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames);
+
+ /* Get the actual size of the buffer. */
+ dataSize = sizeof(*pPeriodSizeInOut);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ *pPeriodSizeInOut = chosenBufferSizeInFrames;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_find_default_AudioObjectID(ma_context* pContext, ma_device_type deviceType, AudioObjectID* pDeviceObjectID)
+{
+ AudioObjectPropertyAddress propAddressDefaultDevice;
+ UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID);
+ AudioObjectID defaultDeviceObjectID;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceObjectID != NULL);
+
+ /* Safety. */
+ *pDeviceObjectID = 0;
+
+ propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddressDefaultDevice.mElement = kAudioObjectPropertyElementMaster;
+ if (deviceType == ma_device_type_playback) {
+ propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ }
+
+ defaultDeviceObjectIDSize = sizeof(AudioObjectID);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID);
+ if (status == noErr) {
+ *pDeviceObjectID = defaultDeviceObjectID;
+ return MA_SUCCESS;
+ }
+
+ /* If we get here it means we couldn't find the device. */
+ return MA_NO_DEVICE;
+}
+
+static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceObjectID != NULL);
+
+ /* Safety. */
+ *pDeviceObjectID = 0;
+
+ if (pDeviceID == NULL) {
+ /* Default device. */
+ return ma_find_default_AudioObjectID(pContext, deviceType, pDeviceObjectID);
+ } else {
+ /* Explicit device. */
+ UInt32 deviceCount;
+ AudioObjectID* pDeviceObjectIDs;
+ ma_result result;
+ UInt32 iDevice;
+
+ result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
+
+ char uid[256];
+ if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
+ if (strcmp(uid, pDeviceID->coreaudio) == 0) {
+ *pDeviceObjectID = deviceObjectID;
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+ }
+ } else {
+ if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
+ if (strcmp(uid, pDeviceID->coreaudio) == 0) {
+ *pDeviceObjectID = deviceObjectID;
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+ }
+ }
+ }
+
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ }
+
+ /* If we get here it means we couldn't find the device. */
+ return MA_NO_DEVICE;
+}
+
+
+static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const AudioStreamBasicDescription* pOrigFormat, AudioStreamBasicDescription* pFormat)
+{
+ UInt32 deviceFormatDescriptionCount;
+ AudioStreamRangedDescription* pDeviceFormatDescriptions;
+ ma_result result;
+ ma_uint32 desiredSampleRate;
+ ma_uint32 desiredChannelCount;
+ ma_format desiredFormat;
+ AudioStreamBasicDescription bestDeviceFormatSoFar;
+ ma_bool32 hasSupportedFormat;
+ UInt32 iFormat;
+
+ result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ desiredSampleRate = sampleRate;
+ if (desiredSampleRate == 0) {
+ desiredSampleRate = pOrigFormat->mSampleRate;
+ }
+
+ desiredChannelCount = channels;
+ if (desiredChannelCount == 0) {
+ desiredChannelCount = pOrigFormat->mChannelsPerFrame;
+ }
+
+ desiredFormat = format;
+ if (desiredFormat == ma_format_unknown) {
+ result = ma_format_from_AudioStreamBasicDescription(pOrigFormat, &desiredFormat);
+ if (result != MA_SUCCESS || desiredFormat == ma_format_unknown) {
+ desiredFormat = g_maFormatPriorities[0];
+ }
+ }
+
+ /*
+ If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next
+ loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases.
+ */
+ MA_ZERO_OBJECT(&bestDeviceFormatSoFar);
+
+ hasSupportedFormat = MA_FALSE;
+ for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
+ ma_format format;
+ ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &format);
+ if (formatResult == MA_SUCCESS && format != ma_format_unknown) {
+ hasSupportedFormat = MA_TRUE;
+ bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat;
+ break;
+ }
+ }
+
+ if (!hasSupportedFormat) {
+ ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks);
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+
+ for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
+ AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat;
+ ma_format thisSampleFormat;
+ ma_result formatResult;
+ ma_format bestSampleFormatSoFar;
+
+ /* If the format is not supported by miniaudio we need to skip this one entirely. */
+ formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat);
+ if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) {
+ continue; /* The format is not supported by miniaudio. Skip. */
+ }
+
+ ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar);
+
+ /* Getting here means the format is supported by miniaudio which makes this format a candidate. */
+ if (thisDeviceFormat.mSampleRate != desiredSampleRate) {
+ /*
+ The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format
+ so far has an equal sample rate we can just ignore this one.
+ */
+ if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) {
+ continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */
+ } else {
+ /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */
+ if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) {
+ /* This format has a different sample rate _and_ a different channel count. */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ continue; /* No change to the best format. */
+ } else {
+ /*
+ Both this format and the best so far have different sample rates and different channel counts. Whichever has the
+ best format is the new best.
+ */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format. */
+ }
+ }
+ } else {
+ /* This format has a different sample rate but the desired channel count. */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ } else {
+ /* This format has the desired channel count, but the best so far does not. We have a new best. */
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ }
+ }
+ }
+ } else {
+ /*
+ The sample rates match which makes this format a very high priority contender. If the best format so far has a different
+ sample rate it needs to be replaced with this one.
+ */
+ if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */
+ if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) {
+ /*
+ In this case this format has the same channel count as what the client is requesting. If the best format so far has
+ a different count, this one becomes the new best.
+ */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */
+ if (thisSampleFormat == desiredFormat) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ break; /* Found the exact match. */
+ } else {
+ /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ }
+ }
+ } else {
+ /*
+ In this case the channel count is different to what the client has requested. If the best so far has the same channel
+ count as the requested count then it remains the best.
+ */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ continue;
+ } else {
+ /*
+ This is the case where both have the same sample rate (good) but different channel counts. Right now both have about
+ the same priority, but we need to compare the format now.
+ */
+ if (thisSampleFormat == bestSampleFormatSoFar) {
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ *pFormat = bestDeviceFormatSoFar;
+
+ ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap)
+{
+ AudioUnitScope deviceScope;
+ AudioUnitElement deviceBus;
+ UInt32 channelLayoutSize;
+ OSStatus status;
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+
+ if (deviceType == ma_device_type_playback) {
+ deviceScope = kAudioUnitScope_Input;
+ deviceBus = MA_COREAUDIO_OUTPUT_BUS;
+ } else {
+ deviceScope = kAudioUnitScope_Output;
+ deviceBus = MA_COREAUDIO_INPUT_BUS;
+ }
+
+ status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize, &pContext->allocationCallbacks);
+ if (pChannelLayout == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize);
+ if (status != noErr) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap);
+ if (result != MA_SUCCESS) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
+ }
+
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+}
+#endif /* MA_APPLE_DESKTOP */
+
+
+#if !defined(MA_APPLE_DESKTOP)
+static void ma_AVAudioSessionPortDescription_to_device_info(AVAudioSessionPortDescription* pPortDesc, ma_device_info* pInfo)
+{
+ MA_ZERO_OBJECT(pInfo);
+ ma_strncpy_s(pInfo->name, sizeof(pInfo->name), [pPortDesc.portName UTF8String], (size_t)-1);
+ ma_strncpy_s(pInfo->id.coreaudio, sizeof(pInfo->id.coreaudio), [pPortDesc.UID UTF8String], (size_t)-1);
+}
+#endif
+
+static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+#if defined(MA_APPLE_DESKTOP)
+ UInt32 deviceCount;
+ AudioObjectID* pDeviceObjectIDs;
+ AudioObjectID defaultDeviceObjectIDPlayback;
+ AudioObjectID defaultDeviceObjectIDCapture;
+ ma_result result;
+ UInt32 iDevice;
+
+ ma_find_default_AudioObjectID(pContext, ma_device_type_playback, &defaultDeviceObjectIDPlayback); /* OK if this fails. */
+ ma_find_default_AudioObjectID(pContext, ma_device_type_capture, &defaultDeviceObjectIDCapture); /* OK if this fails. */
+
+ result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
+ ma_device_info info;
+
+ MA_ZERO_OBJECT(&info);
+ if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) {
+ continue;
+ }
+ if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) {
+ continue;
+ }
+
+ if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
+ if (deviceObjectID == defaultDeviceObjectIDPlayback) {
+ info.isDefault = MA_TRUE;
+ }
+
+ if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
+ break;
+ }
+ }
+ if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
+ if (deviceObjectID == defaultDeviceObjectIDCapture) {
+ info.isDefault = MA_TRUE;
+ }
+
+ if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
+ break;
+ }
+ }
+ }
+
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+#else
+ ma_device_info info;
+ NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs];
+ NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs];
+
+ for (AVAudioSessionPortDescription* pPortDesc in pOutputs) {
+ ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info);
+ if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
+ return MA_SUCCESS;
+ }
+ }
+
+ for (AVAudioSessionPortDescription* pPortDesc in pInputs) {
+ ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info);
+ if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
+ return MA_SUCCESS;
+ }
+ }
+#endif
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+
+#if defined(MA_APPLE_DESKTOP)
+ /* Desktop */
+ {
+ AudioObjectID deviceObjectID;
+ AudioObjectID defaultDeviceObjectID;
+ UInt32 streamDescriptionCount;
+ AudioStreamRangedDescription* pStreamDescriptions;
+ UInt32 iStreamDescription;
+ UInt32 sampleRateRangeCount;
+ AudioValueRange* pSampleRateRanges;
+
+ ma_find_default_AudioObjectID(pContext, deviceType, &defaultDeviceObjectID); /* OK if this fails. */
+
+ result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (deviceObjectID == defaultDeviceObjectID) {
+ pDeviceInfo->isDefault = MA_TRUE;
+ }
+
+ /*
+ There could be a large number of permutations here. Fortunately there is only a single channel count
+ being reported which reduces this quite a bit. For sample rates we're only reporting those that are
+ one of miniaudio's recognized "standard" rates. If there are still more formats than can fit into
+ our fixed sized array we'll just need to truncate them. This is unlikely and will probably only happen
+ if some driver performs software data conversion and therefore reports every possible format and
+ sample rate.
+ */
+ pDeviceInfo->nativeDataFormatCount = 0;
+
+ /* Formats. */
+ {
+ ma_format uniqueFormats[ma_format_count];
+ ma_uint32 uniqueFormatCount = 0;
+ ma_uint32 channels;
+
+ /* Channels. */
+ result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &channels);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* Formats. */
+ result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) {
+ ma_format format;
+ ma_bool32 hasFormatBeenHandled = MA_FALSE;
+ ma_uint32 iOutputFormat;
+ ma_uint32 iSampleRate;
+
+ result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format);
+ if (result != MA_SUCCESS) {
+ continue;
+ }
+
+ MA_ASSERT(format != ma_format_unknown);
+
+ /* Make sure the format isn't already in the output list. */
+ for (iOutputFormat = 0; iOutputFormat < uniqueFormatCount; ++iOutputFormat) {
+ if (uniqueFormats[iOutputFormat] == format) {
+ hasFormatBeenHandled = MA_TRUE;
+ break;
+ }
+ }
+
+ /* If we've already handled this format just skip it. */
+ if (hasFormatBeenHandled) {
+ continue;
+ }
+
+ uniqueFormats[uniqueFormatCount] = format;
+ uniqueFormatCount += 1;
+
+ /* Sample Rates */
+ result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /*
+ Annoyingly Core Audio reports a sample rate range. We just get all the standard rates that are
+ between this range.
+ */
+ for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) {
+ ma_uint32 iStandardSampleRate;
+ for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) {
+ ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate];
+ if (standardSampleRate >= pSampleRateRanges[iSampleRate].mMinimum && standardSampleRate <= pSampleRateRanges[iSampleRate].mMaximum) {
+ /* We have a new data format. Add it to the list. */
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = standardSampleRate;
+ pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0;
+ pDeviceInfo->nativeDataFormatCount += 1;
+
+ if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) {
+ break; /* No more room for any more formats. */
+ }
+ }
+ }
+ }
+
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+
+ if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) {
+ break; /* No more room for any more formats. */
+ }
+ }
+
+ ma_free(pStreamDescriptions, &pContext->allocationCallbacks);
+ }
+ }
+#else
+ /* Mobile */
+ {
+ AudioComponentDescription desc;
+ AudioComponent component;
+ AudioUnit audioUnit;
+ OSStatus status;
+ AudioUnitScope formatScope;
+ AudioUnitElement formatElement;
+ AudioStreamBasicDescription bestFormat;
+ UInt32 propSize;
+
+ /* We want to ensure we use a consistent device name to device enumeration. */
+ if (pDeviceID != NULL && pDeviceID->coreaudio[0] != '\0') {
+ ma_bool32 found = MA_FALSE;
+ if (deviceType == ma_device_type_playback) {
+ NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs];
+ for (AVAudioSessionPortDescription* pPortDesc in pOutputs) {
+ if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) {
+ ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo);
+ found = MA_TRUE;
+ break;
+ }
+ }
+ } else {
+ NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs];
+ for (AVAudioSessionPortDescription* pPortDesc in pInputs) {
+ if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) {
+ ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo);
+ found = MA_TRUE;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ return MA_DOES_NOT_EXIST;
+ }
+ } else {
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+ }
+
+
+ /*
+ Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is
+ reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to
+ retrieve from the AVAudioSession shared instance.
+ */
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
+ if (component == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+
+ status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
+ formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
+
+ propSize = sizeof(bestFormat);
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
+ audioUnit = NULL;
+
+ /* Only a single format is being reported for iOS. */
+ pDeviceInfo->nativeDataFormatCount = 1;
+
+ result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->nativeDataFormats[0].format);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pDeviceInfo->nativeDataFormats[0].channels = bestFormat.mChannelsPerFrame;
+
+ /*
+ It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do
+ this we just get the shared instance and inspect.
+ */
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ MA_ASSERT(pAudioSession != NULL);
+
+ pDeviceInfo->nativeDataFormats[0].sampleRate = (ma_uint32)pAudioSession.sampleRate;
+ }
+ }
+#endif
+
+ (void)pDeviceInfo; /* Unused. */
+ return MA_SUCCESS;
+}
+
+static AudioBufferList* ma_allocate_AudioBufferList__coreaudio(ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ AudioBufferList* pBufferList;
+ UInt32 audioBufferSizeInBytes;
+ size_t allocationSize;
+
+ MA_ASSERT(sizeInFrames > 0);
+ MA_ASSERT(format != ma_format_unknown);
+ MA_ASSERT(channels > 0);
+
+ allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */
+ if (layout == ma_stream_layout_interleaved) {
+ /* Interleaved case. This is the simple case because we just have one buffer. */
+ allocationSize += sizeof(AudioBuffer) * 1;
+ } else {
+ /* Non-interleaved case. This is the more complex case because there's more than one buffer. */
+ allocationSize += sizeof(AudioBuffer) * channels;
+ }
+
+ allocationSize += sizeInFrames * ma_get_bytes_per_frame(format, channels);
+
+ pBufferList = (AudioBufferList*)ma_malloc(allocationSize, pAllocationCallbacks);
+ if (pBufferList == NULL) {
+ return NULL;
+ }
+
+ audioBufferSizeInBytes = (UInt32)(sizeInFrames * ma_get_bytes_per_sample(format));
+
+ if (layout == ma_stream_layout_interleaved) {
+ pBufferList->mNumberBuffers = 1;
+ pBufferList->mBuffers[0].mNumberChannels = channels;
+ pBufferList->mBuffers[0].mDataByteSize = audioBufferSizeInBytes * channels;
+ pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList);
+ } else {
+ ma_uint32 iBuffer;
+ pBufferList->mNumberBuffers = channels;
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
+ pBufferList->mBuffers[iBuffer].mNumberChannels = 1;
+ pBufferList->mBuffers[iBuffer].mDataByteSize = audioBufferSizeInBytes;
+ pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * channels)) + (audioBufferSizeInBytes * iBuffer);
+ }
+ }
+
+ return pBufferList;
+}
+
+static ma_result ma_device_realloc_AudioBufferList__coreaudio(ma_device* pDevice, ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(format != ma_format_unknown);
+ MA_ASSERT(channels > 0);
+
+ /* Only resize the buffer if necessary. */
+ if (pDevice->coreaudio.audioBufferCapInFrames < sizeInFrames) {
+ AudioBufferList* pNewAudioBufferList;
+
+ pNewAudioBufferList = ma_allocate_AudioBufferList__coreaudio(sizeInFrames, format, channels, layout, &pDevice->pContext->allocationCallbacks);
+ if (pNewAudioBufferList == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ /* At this point we'll have a new AudioBufferList and we can free the old one. */
+ ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ pDevice->coreaudio.pAudioBufferList = pNewAudioBufferList;
+ pDevice->coreaudio.audioBufferCapInFrames = sizeInFrames;
+ }
+
+ /* Getting here means the capacity of the audio is fine. */
+ return MA_SUCCESS;
+}
+
+
+static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ ma_stream_layout layout;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pBufferList->mNumberBuffers);*/
+
+ /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
+ layout = ma_stream_layout_interleaved;
+ if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) {
+ layout = ma_stream_layout_deinterleaved;
+ }
+
+ if (layout == ma_stream_layout_interleaved) {
+ /* For now we can assume everything is interleaved. */
+ UInt32 iBuffer;
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
+ if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) {
+ ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ if (frameCountForThisBuffer > 0) {
+ ma_device_handle_backend_data_callback(pDevice, pBufferList->mBuffers[iBuffer].mData, NULL, frameCountForThisBuffer);
+ }
+
+ /*a_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/
+ } else {
+ /*
+ This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
+ not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just
+ output silence here.
+ */
+ MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize);
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/
+ }
+ }
+ } else {
+ /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */
+ MA_ASSERT(pDevice->playback.internalChannels <= MA_MAX_CHANNELS); /* This should heve been validated at initialization time. */
+
+ /*
+ For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something
+ very strange has happened and we're not going to support it.
+ */
+ if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) {
+ ma_uint8 tempBuffer[4096];
+ UInt32 iBuffer;
+
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) {
+ ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat);
+ ma_uint32 framesRemaining = frameCountPerBuffer;
+
+ while (framesRemaining > 0) {
+ void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
+ ma_uint32 iChannel;
+ ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ if (framesToRead > framesRemaining) {
+ framesToRead = framesRemaining;
+ }
+
+ ma_device_handle_backend_data_callback(pDevice, tempBuffer, NULL, framesToRead);
+
+ for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
+ ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat));
+ }
+
+ ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers);
+
+ framesRemaining -= framesToRead;
+ }
+ }
+ }
+ }
+
+ (void)pActionFlags;
+ (void)pTimeStamp;
+ (void)busNumber;
+ (void)frameCount;
+
+ return noErr;
+}
+
+static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ AudioBufferList* pRenderedBufferList;
+ ma_result result;
+ ma_stream_layout layout;
+ ma_uint32 iBuffer;
+ OSStatus status;
+
+ MA_ASSERT(pDevice != NULL);
+
+ pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList;
+ MA_ASSERT(pRenderedBufferList);
+
+ /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
+ layout = ma_stream_layout_interleaved;
+ if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) {
+ layout = ma_stream_layout_deinterleaved;
+ }
+
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pRenderedBufferList->mNumberBuffers);*/
+
+ /*
+ There has been a situation reported where frame count passed into this function is greater than the capacity of
+ our capture buffer. There doesn't seem to be a reliable way to determine what the maximum frame count will be,
+ so we need to instead resort to dynamically reallocating our buffer to ensure it's large enough to capture the
+ number of frames requested by this callback.
+ */
+ result = ma_device_realloc_AudioBufferList__coreaudio(pDevice, frameCount, pDevice->capture.internalFormat, pDevice->capture.internalChannels, layout);
+ if (result != MA_SUCCESS) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "Failed to allocate AudioBufferList for capture.\n");
+ return noErr;
+ }
+
+ pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList;
+ MA_ASSERT(pRenderedBufferList);
+
+ /*
+ When you call AudioUnitRender(), Core Audio tries to be helpful by setting the mDataByteSize to the number of bytes
+ that were actually rendered. The problem with this is that the next call can fail with -50 due to the size no longer
+ being set to the capacity of the buffer, but instead the size in bytes of the previous render. This will cause a
+ problem when a future call to this callback specifies a larger number of frames.
+
+ To work around this we need to explicitly set the size of each buffer to their respective size in bytes.
+ */
+ for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) {
+ pRenderedBufferList->mBuffers[iBuffer].mDataByteSize = pDevice->coreaudio.audioBufferCapInFrames * ma_get_bytes_per_sample(pDevice->capture.internalFormat) * pRenderedBufferList->mBuffers[iBuffer].mNumberChannels;
+ }
+
+ status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList);
+ if (status != noErr) {
+ ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " ERROR: AudioUnitRender() failed with %d.\n", (int)status);
+ return status;
+ }
+
+ if (layout == ma_stream_layout_interleaved) {
+ for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) {
+ if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) {
+ ma_device_handle_backend_data_callback(pDevice, NULL, pRenderedBufferList->mBuffers[iBuffer].mData, frameCount);
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " mDataByteSize=%d.\n", (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/
+ } else {
+ /*
+ This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
+ not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams.
+ */
+ ma_uint8 silentBuffer[4096];
+ ma_uint32 framesRemaining;
+
+ MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer));
+
+ framesRemaining = frameCount;
+ while (framesRemaining > 0) {
+ ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ if (framesToSend > framesRemaining) {
+ framesToSend = framesRemaining;
+ }
+
+ ma_device_handle_backend_data_callback(pDevice, NULL, silentBuffer, framesToSend);
+
+ framesRemaining -= framesToSend;
+ }
+
+ /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/
+ }
+ }
+ } else {
+ /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */
+ MA_ASSERT(pDevice->capture.internalChannels <= MA_MAX_CHANNELS); /* This should have been validated at initialization time. */
+
+ /*
+ For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something
+ very strange has happened and we're not going to support it.
+ */
+ if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) {
+ ma_uint8 tempBuffer[4096];
+ for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) {
+ ma_uint32 framesRemaining = frameCount;
+ while (framesRemaining > 0) {
+ void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
+ ma_uint32 iChannel;
+ ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ if (framesToSend > framesRemaining) {
+ framesToSend = framesRemaining;
+ }
+
+ for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
+ ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat));
+ }
+
+ ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer);
+ ma_device_handle_backend_data_callback(pDevice, NULL, tempBuffer, framesToSend);
+
+ framesRemaining -= framesToSend;
+ }
+ }
+ }
+ }
+
+ (void)pActionFlags;
+ (void)pTimeStamp;
+ (void)busNumber;
+ (void)frameCount;
+ (void)pUnusedBufferList;
+
+ return noErr;
+}
+
+static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ /* Don't do anything if it looks like we're just reinitializing due to a device switch. */
+ if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) ||
+ ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) {
+ return;
+ }
+
+ /*
+ There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like
+ AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit)
+ can try waiting on the same lock. I'm going to try working around this by not calling any Core
+ Audio APIs in the callback when the device has been stopped or uninitialized.
+ */
+ if (ma_device_get_state(pDevice) == ma_device_state_uninitialized || ma_device_get_state(pDevice) == ma_device_state_stopping || ma_device_get_state(pDevice) == ma_device_state_stopped) {
+ ma_device__on_notification_stopped(pDevice);
+ } else {
+ UInt32 isRunning;
+ UInt32 isRunningSize = sizeof(isRunning);
+ OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize);
+ if (status != noErr) {
+ goto done; /* Don't really know what to do in this case... just ignore it, I suppose... */
+ }
+
+ if (!isRunning) {
+ /*
+ The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider:
+
+ 1) When the device is unplugged, this will be called _before_ the default device change notification.
+ 2) When the device is changed via the default device change notification, this will be called _after_ the switch.
+
+ For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag.
+ */
+ if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) ||
+ ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) {
+ /*
+ It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device
+ via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the
+ device to be seamless to the client (we don't want them receiving the stopped event and thinking that the device has stopped when it
+ hasn't!).
+ */
+ if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) ||
+ ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) {
+ goto done;
+ }
+
+ /*
+ Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio
+ will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most
+ likely be successful in switching to the new device.
+
+ TODO: Try to predict if Core Audio will switch devices. If not, the stopped callback needs to be posted.
+ */
+ goto done;
+ }
+
+ /* Getting here means we need to stop the device. */
+ ma_device__on_notification_stopped(pDevice);
+ }
+ }
+
+ (void)propertyID; /* Unused. */
+
+done:
+ /* Always signal the stop event. It's possible for the "else" case to get hit which can happen during an interruption. */
+ ma_event_signal(&pDevice->coreaudio.stopEvent);
+}
+
+#if defined(MA_APPLE_DESKTOP)
+static ma_spinlock g_DeviceTrackingInitLock_CoreAudio = 0; /* A spinlock for mutal exclusion of the init/uninit of the global tracking data. Initialization to 0 is what we need. */
+static ma_uint32 g_DeviceTrackingInitCounter_CoreAudio = 0;
+static ma_mutex g_DeviceTrackingMutex_CoreAudio;
+static ma_device** g_ppTrackedDevices_CoreAudio = NULL;
+static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0;
+static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0;
+
+static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData)
+{
+ ma_device_type deviceType;
+
+ /* Not sure if I really need to check this, but it makes me feel better. */
+ if (addressCount == 0) {
+ return noErr;
+ }
+
+ if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) {
+ deviceType = ma_device_type_playback;
+ } else if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) {
+ deviceType = ma_device_type_capture;
+ } else {
+ return noErr; /* Should never hit this. */
+ }
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
+ {
+ ma_uint32 iDevice;
+ for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) {
+ ma_result reinitResult;
+ ma_device* pDevice;
+
+ pDevice = g_ppTrackedDevices_CoreAudio[iDevice];
+ if (pDevice->type == deviceType || pDevice->type == ma_device_type_duplex) {
+ if (deviceType == ma_device_type_playback) {
+ pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE;
+ reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE);
+ pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE;
+ } else {
+ pDevice->coreaudio.isSwitchingCaptureDevice = MA_TRUE;
+ reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE);
+ pDevice->coreaudio.isSwitchingCaptureDevice = MA_FALSE;
+ }
+
+ if (reinitResult == MA_SUCCESS) {
+ ma_device__post_init_setup(pDevice, deviceType);
+
+ /* Restart the device if required. If this fails we need to stop the device entirely. */
+ if (ma_device_get_state(pDevice) == ma_device_state_started) {
+ OSStatus status;
+ if (deviceType == ma_device_type_playback) {
+ status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ ma_device__set_state(pDevice, ma_device_state_stopped);
+ }
+ } else if (deviceType == ma_device_type_capture) {
+ status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
+ ma_device__set_state(pDevice, ma_device_state_stopped);
+ }
+ }
+ }
+
+ ma_device__on_notification_rerouted(pDevice);
+ }
+ }
+ }
+ }
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ /* Unused parameters. */
+ (void)objectID;
+ (void)pUserData;
+
+ return noErr;
+}
+
+static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+
+ ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio);
+ {
+ /* Don't do anything if we've already initializd device tracking. */
+ if (g_DeviceTrackingInitCounter_CoreAudio == 0) {
+ AudioObjectPropertyAddress propAddress;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ ma_mutex_init(&g_DeviceTrackingMutex_CoreAudio);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ }
+ g_DeviceTrackingInitCounter_CoreAudio += 1;
+ }
+ ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+
+ ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio);
+ {
+ if (g_DeviceTrackingInitCounter_CoreAudio > 0)
+ g_DeviceTrackingInitCounter_CoreAudio -= 1;
+
+ if (g_DeviceTrackingInitCounter_CoreAudio == 0) {
+ AudioObjectPropertyAddress propAddress;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ /* At this point there should be no tracked devices. If not there's an error somewhere. */
+ if (g_ppTrackedDevices_CoreAudio != NULL) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "You have uninitialized all contexts while an associated device is still active.");
+ ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio);
+ return MA_INVALID_OPERATION;
+ }
+
+ ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio);
+ }
+ }
+ ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device__track__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
+ {
+ /* Allocate memory if required. */
+ if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) {
+ ma_uint32 newCap;
+ ma_device** ppNewDevices;
+
+ newCap = g_TrackedDeviceCap_CoreAudio * 2;
+ if (newCap == 0) {
+ newCap = 1;
+ }
+
+ ppNewDevices = (ma_device**)ma_realloc(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, &pDevice->pContext->allocationCallbacks);
+ if (ppNewDevices == NULL) {
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ g_ppTrackedDevices_CoreAudio = ppNewDevices;
+ g_TrackedDeviceCap_CoreAudio = newCap;
+ }
+
+ g_ppTrackedDevices_CoreAudio[g_TrackedDeviceCount_CoreAudio] = pDevice;
+ g_TrackedDeviceCount_CoreAudio += 1;
+ }
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device__untrack__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
+ {
+ ma_uint32 iDevice;
+ for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) {
+ if (g_ppTrackedDevices_CoreAudio[iDevice] == pDevice) {
+ /* We've found the device. We now need to remove it from the list. */
+ ma_uint32 jDevice;
+ for (jDevice = iDevice; jDevice < g_TrackedDeviceCount_CoreAudio-1; jDevice += 1) {
+ g_ppTrackedDevices_CoreAudio[jDevice] = g_ppTrackedDevices_CoreAudio[jDevice+1];
+ }
+
+ g_TrackedDeviceCount_CoreAudio -= 1;
+
+ /* If there's nothing else in the list we need to free memory. */
+ if (g_TrackedDeviceCount_CoreAudio == 0) {
+ ma_free(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks);
+ g_ppTrackedDevices_CoreAudio = NULL;
+ g_TrackedDeviceCap_CoreAudio = 0;
+ }
+
+ break;
+ }
+ }
+ }
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ return MA_SUCCESS;
+}
+#endif
+
+#if defined(MA_APPLE_MOBILE)
+@interface ma_ios_notification_handler:NSObject {
+ ma_device* m_pDevice;
+}
+@end
+
+@implementation ma_ios_notification_handler
+-(id)init:(ma_device*)pDevice
+{
+ self = [super init];
+ m_pDevice = pDevice;
+
+ /* For route changes. */
+ [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]];
+
+ /* For interruptions. */
+ [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_interruption:) name:AVAudioSessionInterruptionNotification object:[AVAudioSession sharedInstance]];
+
+ return self;
+}
+
+-(void)dealloc
+{
+ [self remove_handler];
+}
+
+-(void)remove_handler
+{
+ [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionRouteChangeNotification object:nil];
+ [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionInterruptionNotification object:nil];
+}
+
+-(void)handle_interruption:(NSNotification*)pNotification
+{
+ NSInteger type = [[[pNotification userInfo] objectForKey:AVAudioSessionInterruptionTypeKey] integerValue];
+ switch (type)
+ {
+ case AVAudioSessionInterruptionTypeBegan:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeBegan\n");
+
+ /*
+ Core Audio will have stopped the internal device automatically, but we need explicitly
+ stop it at a higher level to ensure miniaudio-specific state is updated for consistency.
+ */
+ ma_device_stop(m_pDevice);
+
+ /*
+ Fire the notification after the device has been stopped to ensure it's in the correct
+ state when the notification handler is invoked.
+ */
+ ma_device__on_notification_interruption_began(m_pDevice);
+ } break;
+
+ case AVAudioSessionInterruptionTypeEnded:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeEnded\n");
+ ma_device__on_notification_interruption_ended(m_pDevice);
+ } break;
+ }
+}
+
+-(void)handle_route_change:(NSNotification*)pNotification
+{
+ AVAudioSession* pSession = [AVAudioSession sharedInstance];
+
+ NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue];
+ switch (reason)
+ {
+ case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonWakeFromSleep:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonOverride:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonCategoryChange:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n");
+ } break;
+
+ case AVAudioSessionRouteChangeReasonUnknown:
+ default:
+ {
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n");
+ } break;
+ }
+
+ ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_DEBUG, "[Core Audio] Changing Route. inputNumberChannels=%d; outputNumberOfChannels=%d\n", (int)pSession.inputNumberOfChannels, (int)pSession.outputNumberOfChannels);
+
+ /* Let the application know about the route change. */
+ ma_device__on_notification_rerouted(m_pDevice);
+}
+@end
+#endif
+
+static ma_result ma_device_uninit__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_uninitialized);
+
+#if defined(MA_APPLE_DESKTOP)
+ /*
+ Make sure we're no longer tracking the device. It doesn't matter if we call this for a non-default device because it'll
+ just gracefully ignore it.
+ */
+ ma_device__untrack__coreaudio(pDevice);
+#endif
+#if defined(MA_APPLE_MOBILE)
+ if (pDevice->coreaudio.pNotificationHandler != NULL) {
+ ma_ios_notification_handler* pNotificationHandler = (MA_BRIDGE_TRANSFER ma_ios_notification_handler*)pDevice->coreaudio.pNotificationHandler;
+ [pNotificationHandler remove_handler];
+ }
+#endif
+
+ if (pDevice->coreaudio.audioUnitCapture != NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ if (pDevice->coreaudio.audioUnitPlayback != NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
+
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
+
+ return MA_SUCCESS;
+}
+
+typedef struct
+{
+ ma_bool32 allowNominalSampleRateChange;
+
+ /* Input. */
+ ma_format formatIn;
+ ma_uint32 channelsIn;
+ ma_uint32 sampleRateIn;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesIn;
+ ma_uint32 periodSizeInMillisecondsIn;
+ ma_uint32 periodsIn;
+ ma_share_mode shareMode;
+ ma_performance_profile performanceProfile;
+ ma_bool32 registerStopEvent;
+
+ /* Output. */
+#if defined(MA_APPLE_DESKTOP)
+ AudioObjectID deviceObjectID;
+#endif
+ AudioComponent component;
+ AudioUnit audioUnit;
+ AudioBufferList* pAudioBufferList; /* Only used for input devices. */
+ ma_format formatOut;
+ ma_uint32 channelsOut;
+ ma_uint32 sampleRateOut;
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesOut;
+ ma_uint32 periodsOut;
+ char deviceName[256];
+} ma_device_init_internal_data__coreaudio;
+
+static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */
+{
+ ma_result result;
+ OSStatus status;
+ UInt32 enableIOFlag;
+ AudioStreamBasicDescription bestFormat;
+ UInt32 actualPeriodSizeInFrames;
+ AURenderCallbackStruct callbackInfo;
+#if defined(MA_APPLE_DESKTOP)
+ AudioObjectID deviceObjectID;
+#endif
+
+ /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture);
+
+#if defined(MA_APPLE_DESKTOP)
+ pData->deviceObjectID = 0;
+#endif
+ pData->component = NULL;
+ pData->audioUnit = NULL;
+ pData->pAudioBufferList = NULL;
+
+#if defined(MA_APPLE_DESKTOP)
+ result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pData->deviceObjectID = deviceObjectID;
+#endif
+
+ /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */
+ pData->periodsOut = pData->periodsIn;
+ if (pData->periodsOut == 0) {
+ pData->periodsOut = MA_DEFAULT_PERIODS;
+ }
+ if (pData->periodsOut > 16) {
+ pData->periodsOut = 16;
+ }
+
+
+ /* Audio unit. */
+ status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+
+ /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */
+ enableIOFlag = 1;
+ if (deviceType == ma_device_type_capture) {
+ enableIOFlag = 0;
+ }
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ enableIOFlag = (enableIOFlag == 0) ? 1 : 0;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+
+ /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */
+#if defined(MA_APPLE_DESKTOP)
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceObjectID, sizeof(deviceObjectID));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(result);
+ }
+#else
+ /*
+ For some reason it looks like Apple is only allowing selection of the input device. There does not appear to be any way to change
+ the default output route. I have no idea why this is like this, but for now we'll only be able to configure capture devices.
+ */
+ if (pDeviceID != NULL) {
+ if (deviceType == ma_device_type_capture) {
+ ma_bool32 found = MA_FALSE;
+ NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs];
+ for (AVAudioSessionPortDescription* pPortDesc in pInputs) {
+ if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) {
+ [[AVAudioSession sharedInstance] setPreferredInput:pPortDesc error:nil];
+ found = MA_TRUE;
+ break;
+ }
+ }
+
+ if (found == MA_FALSE) {
+ return MA_DOES_NOT_EXIST;
+ }
+ }
+ }
+#endif
+
+ /*
+ Format. This is the hardest part of initialization because there's a few variables to take into account.
+ 1) The format must be supported by the device.
+ 2) The format must be supported miniaudio.
+ 3) There's a priority that miniaudio prefers.
+
+ Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The
+ most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same
+ for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely.
+
+ On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to.
+ */
+ {
+ AudioStreamBasicDescription origFormat;
+ UInt32 origFormatSize = sizeof(origFormat);
+ AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
+ AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
+
+ if (deviceType == ma_device_type_playback) {
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize);
+ } else {
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize);
+ }
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ #if defined(MA_APPLE_DESKTOP)
+ result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, &origFormat, &bestFormat);
+ if (result != MA_SUCCESS) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return result;
+ }
+
+ /*
+ Technical Note TN2091: Device input using the HAL Output Audio Unit
+ https://developer.apple.com/library/archive/technotes/tn2091/_index.html
+
+ This documentation says the following:
+
+ The internal AudioConverter can handle any *simple* conversion. Typically, this means that a client can specify ANY
+ variant of the PCM formats. Consequently, the device's sample rate should match the desired sample rate. If sample rate
+ conversion is needed, it can be accomplished by buffering the input and converting the data on a separate thread with
+ another AudioConverter.
+
+ The important part here is the mention that it can handle *simple* conversions, which does *not* include sample rate. We
+ therefore want to ensure the sample rate stays consistent. This document is specifically for input, but I'm going to play it
+ safe and apply the same rule to output as well.
+
+ I have tried going against the documentation by setting the sample rate anyway, but this just results in AudioUnitRender()
+ returning a result code of -10863. I have also tried changing the format directly on the input scope on the input bus, but
+ this just results in `ca_require: IsStreamFormatWritable(inScope, inElement) NotWritable` when trying to set the format.
+
+ Something that does seem to work, however, has been setting the nominal sample rate on the deivce object. The problem with
+ this, however, is that it actually changes the sample rate at the operating system level and not just the application. This
+ could be intrusive to the user, however, so I don't think it's wise to make this the default. Instead I'm making this a
+ configuration option. When the `coreaudio.allowNominalSampleRateChange` config option is set to true, changing the sample
+ rate will be allowed. Otherwise it'll be fixed to the current sample rate. To check the system-defined sample rate, run
+ the Audio MIDI Setup program that comes installed on macOS and observe how the sample rate changes as the sample rate is
+ changed by miniaudio.
+ */
+ if (pData->allowNominalSampleRateChange) {
+ AudioValueRange sampleRateRange;
+ AudioObjectPropertyAddress propAddress;
+
+ sampleRateRange.mMinimum = bestFormat.mSampleRate;
+ sampleRateRange.mMaximum = bestFormat.mSampleRate;
+
+ propAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(sampleRateRange), &sampleRateRange);
+ if (status != noErr) {
+ bestFormat.mSampleRate = origFormat.mSampleRate;
+ }
+ } else {
+ bestFormat.mSampleRate = origFormat.mSampleRate;
+ }
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
+ if (status != noErr) {
+ /* We failed to set the format, so fall back to the current format of the audio unit. */
+ bestFormat = origFormat;
+ }
+ #else
+ bestFormat = origFormat;
+
+ /*
+ Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try
+ setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since
+ it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I
+ can tell, it looks like the sample rate is shared between playback and capture for everything.
+ */
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ MA_ASSERT(pAudioSession != NULL);
+
+ [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil];
+ bestFormat.mSampleRate = pAudioSession.sampleRate;
+
+ /*
+ I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with
+ AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead.
+ */
+ if (deviceType == ma_device_type_playback) {
+ bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels;
+ }
+ if (deviceType == ma_device_type_capture) {
+ bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels;
+ }
+ }
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+ #endif
+
+ result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut);
+ if (result != MA_SUCCESS) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return result;
+ }
+
+ if (pData->formatOut == ma_format_unknown) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ pData->channelsOut = bestFormat.mChannelsPerFrame;
+ pData->sampleRateOut = bestFormat.mSampleRate;
+ }
+
+ /* Clamp the channel count for safety. */
+ if (pData->channelsOut > MA_MAX_CHANNELS) {
+ pData->channelsOut = MA_MAX_CHANNELS;
+ }
+
+ /*
+ Internal channel map. This is weird in my testing. If I use the AudioObject to get the
+ channel map, the channel descriptions are set to "Unknown" for some reason. To work around
+ this it looks like retrieving it from the AudioUnit will work. However, and this is where
+ it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore
+ I'm going to fall back to a default assumption in these cases.
+ */
+#if defined(MA_APPLE_DESKTOP)
+ result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut, pData->channelsOut);
+ if (result != MA_SUCCESS) {
+ #if 0
+ /* Try falling back to the channel map from the AudioObject. */
+ result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut, pData->channelsOut);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ #else
+ /* Fall back to default assumptions. */
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut);
+ #endif
+ }
+#else
+ /* TODO: Figure out how to get the channel map using AVAudioSession. */
+ ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut);
+#endif
+
+
+ /* Buffer size. Not allowing this to be configurable on iOS. */
+ if (pData->periodSizeInFramesIn == 0) {
+ if (pData->periodSizeInMillisecondsIn == 0) {
+ if (pData->performanceProfile == ma_performance_profile_low_latency) {
+ actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, pData->sampleRateOut);
+ } else {
+ actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, pData->sampleRateOut);
+ }
+ } else {
+ actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, pData->sampleRateOut);
+ }
+ } else {
+ actualPeriodSizeInFrames = pData->periodSizeInFramesIn;
+ }
+
+#if defined(MA_APPLE_DESKTOP)
+ result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualPeriodSizeInFrames);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+#else
+ /*
+ On iOS, the size of the IO buffer needs to be specified in seconds and is a floating point
+ number. I don't trust any potential truncation errors due to converting from float to integer
+ so I'm going to explicitly set the actual period size to the next power of 2.
+ */
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ MA_ASSERT(pAudioSession != NULL);
+
+ [pAudioSession setPreferredIOBufferDuration:((float)actualPeriodSizeInFrames / pAudioSession.sampleRate) error:nil];
+ actualPeriodSizeInFrames = ma_next_power_of_2((ma_uint32)(pAudioSession.IOBufferDuration * pAudioSession.sampleRate));
+ }
+#endif
+
+
+ /*
+ During testing I discovered that the buffer size can be too big. You'll get an error like this:
+
+ kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512
+
+ Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that
+ of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice.
+ */
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ pData->periodSizeInFramesOut = (ma_uint32)actualPeriodSizeInFrames;
+
+ /* We need a buffer list if this is an input device. We render into this in the input callback. */
+ if (deviceType == ma_device_type_capture) {
+ ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0;
+ AudioBufferList* pBufferList;
+
+ pBufferList = ma_allocate_AudioBufferList__coreaudio(pData->periodSizeInFramesOut, pData->formatOut, pData->channelsOut, (isInterleaved) ? ma_stream_layout_interleaved : ma_stream_layout_deinterleaved, &pContext->allocationCallbacks);
+ if (pBufferList == NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ pData->pAudioBufferList = pBufferList;
+ }
+
+ /* Callbacks. */
+ callbackInfo.inputProcRefCon = pDevice_DoNotReference;
+ if (deviceType == ma_device_type_playback) {
+ callbackInfo.inputProc = ma_on_output__coreaudio;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+ } else {
+ callbackInfo.inputProc = ma_on_input__coreaudio;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ /* We need to listen for stop events. */
+ if (pData->registerStopEvent) {
+ status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference);
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ /* Initialize the audio unit. */
+ status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit);
+ if (status != noErr) {
+ ma_free(pData->pAudioBufferList, &pContext->allocationCallbacks);
+ pData->pAudioBufferList = NULL;
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ /* Grab the name. */
+#if defined(MA_APPLE_DESKTOP)
+ ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName);
+#else
+ if (deviceType == ma_device_type_playback) {
+ ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
+ } else {
+ ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME);
+ }
+#endif
+
+ return result;
+}
+
+#if defined(MA_APPLE_DESKTOP)
+static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit)
+{
+ ma_device_init_internal_data__coreaudio data;
+ ma_result result;
+
+ /* This should only be called for playback or capture, not duplex. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ data.allowNominalSampleRateChange = MA_FALSE; /* Don't change the nominal sample rate when switching devices. */
+
+ if (deviceType == ma_device_type_capture) {
+ data.formatIn = pDevice->capture.format;
+ data.channelsIn = pDevice->capture.channels;
+ data.sampleRateIn = pDevice->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
+ data.shareMode = pDevice->capture.shareMode;
+ data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile;
+ data.registerStopEvent = MA_TRUE;
+
+ if (disposePreviousAudioUnit) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
+ } else if (deviceType == ma_device_type_playback) {
+ data.formatIn = pDevice->playback.format;
+ data.channelsIn = pDevice->playback.channels;
+ data.sampleRateIn = pDevice->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
+ data.shareMode = pDevice->playback.shareMode;
+ data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile;
+ data.registerStopEvent = (pDevice->type != ma_device_type_duplex);
+
+ if (disposePreviousAudioUnit) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
+ }
+ data.periodSizeInFramesIn = pDevice->coreaudio.originalPeriodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDevice->coreaudio.originalPeriodSizeInMilliseconds;
+ data.periodsIn = pDevice->coreaudio.originalPeriods;
+
+ /* Need at least 3 periods for duplex. */
+ if (data.periodsIn < 3 && pDevice->type == ma_device_type_duplex) {
+ data.periodsIn = 3;
+ }
+
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
+ ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio);
+ #endif
+ pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
+ pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
+ pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut;
+
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+ } else if (deviceType == ma_device_type_playback) {
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
+ ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio);
+ #endif
+ pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
+
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+ }
+
+ return MA_SUCCESS;
+}
+#endif /* MA_APPLE_DESKTOP */
+
+static ma_result ma_device_init__coreaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pConfig != NULL);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* No exclusive mode with the Core Audio backend for now. */
+ if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ /* Capture needs to be initialized first. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__coreaudio data;
+ data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange;
+ data.formatIn = pDescriptorCapture->format;
+ data.channelsIn = pDescriptorCapture->channels;
+ data.sampleRateIn = pDescriptorCapture->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap));
+ data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds;
+ data.periodsIn = pDescriptorCapture->periodCount;
+ data.shareMode = pDescriptorCapture->shareMode;
+ data.performanceProfile = pConfig->performanceProfile;
+ data.registerStopEvent = MA_TRUE;
+
+ /* Need at least 3 periods for duplex. */
+ if (data.periodsIn < 3 && pConfig->deviceType == ma_device_type_duplex) {
+ data.periodsIn = 3;
+ }
+
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pDescriptorCapture->pDeviceID, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL);
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
+ pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
+ pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut;
+ pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames;
+ pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds;
+ pDevice->coreaudio.originalPeriods = pDescriptorCapture->periodCount;
+ pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile;
+
+ pDescriptorCapture->format = data.formatOut;
+ pDescriptorCapture->channels = data.channelsOut;
+ pDescriptorCapture->sampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut;
+ pDescriptorCapture->periodCount = data.periodsOut;
+
+ #if defined(MA_APPLE_DESKTOP)
+ ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio);
+
+ /*
+ If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
+ switch the device in the background.
+ */
+ if (pConfig->capture.pDeviceID == NULL) {
+ ma_device__track__coreaudio(pDevice);
+ }
+ #endif
+ }
+
+ /* Playback. */
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__coreaudio data;
+ data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange;
+ data.formatIn = pDescriptorPlayback->format;
+ data.channelsIn = pDescriptorPlayback->channels;
+ data.sampleRateIn = pDescriptorPlayback->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap));
+ data.shareMode = pDescriptorPlayback->shareMode;
+ data.performanceProfile = pConfig->performanceProfile;
+
+ /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames;
+ data.periodsIn = pDescriptorCapture->periodCount;
+ data.registerStopEvent = MA_FALSE;
+ } else {
+ data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds;
+ data.periodsIn = pDescriptorPlayback->periodCount;
+ data.registerStopEvent = MA_TRUE;
+ }
+
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
+ }
+ return result;
+ }
+
+ pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL);
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
+ pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames;
+ pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds;
+ pDevice->coreaudio.originalPeriods = pDescriptorPlayback->periodCount;
+ pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile;
+
+ pDescriptorPlayback->format = data.formatOut;
+ pDescriptorPlayback->channels = data.channelsOut;
+ pDescriptorPlayback->sampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut;
+ pDescriptorPlayback->periodCount = data.periodsOut;
+
+ #if defined(MA_APPLE_DESKTOP)
+ ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio);
+
+ /*
+ If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
+ switch the device in the background.
+ */
+ if (pDescriptorPlayback->pDeviceID == NULL && (pConfig->deviceType != ma_device_type_duplex || pDescriptorCapture->pDeviceID != NULL)) {
+ ma_device__track__coreaudio(pDevice);
+ }
+ #endif
+ }
+
+
+
+ /*
+ When stopping the device, a callback is called on another thread. We need to wait for this callback
+ before returning from ma_device_stop(). This event is used for this.
+ */
+ ma_event_init(&pDevice->coreaudio.stopEvent);
+
+ /*
+ We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done
+ differently on non-Desktop Apple platforms.
+ */
+#if defined(MA_APPLE_MOBILE)
+ pDevice->coreaudio.pNotificationHandler = (MA_BRIDGE_RETAINED void*)[[ma_ios_notification_handler alloc] init:pDevice];
+#endif
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_start__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ /* It's not clear from the documentation whether or not AudioOutputUnitStop() actually drains the device or not. */
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ /* We need to wait for the callback to finish before returning. */
+ ma_event_wait(&pDevice->coreaudio.stopEvent);
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_uninit__coreaudio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_coreaudio);
+
+#if defined(MA_APPLE_MOBILE)
+ if (!pContext->coreaudio.noAudioSessionDeactivate) {
+ if (![[AVAudioSession sharedInstance] setActive:false error:nil]) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to deactivate audio session.");
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+ }
+#endif
+
+#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+#endif
+
+#if !defined(MA_APPLE_MOBILE)
+ ma_context__uninit_device_tracking__coreaudio(pContext);
+#endif
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+#if defined(MA_APPLE_MOBILE) && defined(__IPHONE_12_0)
+static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category)
+{
+ /* The "default" and "none" categories are treated different and should not be used as an input into this function. */
+ MA_ASSERT(category != ma_ios_session_category_default);
+ MA_ASSERT(category != ma_ios_session_category_none);
+
+ switch (category) {
+ case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient;
+ case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient;
+ case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback;
+ case ma_ios_session_category_record: return AVAudioSessionCategoryRecord;
+ case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord;
+ case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute;
+ case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient;
+ case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient;
+ default: return AVAudioSessionCategoryAmbient;
+ }
+}
+#endif
+
+static ma_result ma_context_init__coreaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+#if !defined(MA_APPLE_MOBILE)
+ ma_result result;
+#endif
+
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pContext != NULL);
+
+#if defined(MA_APPLE_MOBILE)
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions;
+
+ MA_ASSERT(pAudioSession != NULL);
+
+ if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) {
+ /*
+ I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails
+ we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category.
+ */
+ #if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH)
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+ #endif
+
+ if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) {
+ /* Using PlayAndRecord */
+ } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) {
+ /* Using Playback */
+ } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) {
+ /* Using Record */
+ } else {
+ /* Leave as default? */
+ }
+ } else {
+ if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) {
+ #if defined(__IPHONE_12_0)
+ if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) {
+ return MA_INVALID_OPERATION; /* Failed to set session category. */
+ }
+ #else
+ /* Ignore the session category on version 11 and older, but post a warning. */
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Session category only supported in iOS 12 and newer.");
+ #endif
+ }
+ }
+
+ if (!pConfig->coreaudio.noAudioSessionActivate) {
+ if (![pAudioSession setActive:true error:nil]) {
+ ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to activate audio session.");
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+ }
+ }
+#endif
+
+#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ pContext->coreaudio.hCoreFoundation = ma_dlopen(pContext, "CoreFoundation.framework/CoreFoundation");
+ if (pContext->coreaudio.hCoreFoundation == NULL) {
+ return MA_API_NOT_FOUND;
+ }
+
+ pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString");
+ pContext->coreaudio.CFRelease = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFRelease");
+
+
+ pContext->coreaudio.hCoreAudio = ma_dlopen(pContext, "CoreAudio.framework/CoreAudio");
+ if (pContext->coreaudio.hCoreAudio == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
+ }
+
+ pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData");
+ pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize");
+ pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData");
+ pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener");
+ pContext->coreaudio.AudioObjectRemovePropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectRemovePropertyListener");
+
+ /*
+ It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still
+ defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback.
+ The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to
+ AudioToolbox.
+ */
+ pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioUnit.framework/AudioUnit");
+ if (pContext->coreaudio.hAudioUnit == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
+ }
+
+ if (ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) {
+ /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioToolbox.framework/AudioToolbox");
+ if (pContext->coreaudio.hAudioUnit == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
+ }
+ }
+
+ pContext->coreaudio.AudioComponentFindNext = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext");
+ pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose");
+ pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew");
+ pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart");
+ pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop");
+ pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener");
+ pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo");
+ pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty");
+ pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty");
+ pContext->coreaudio.AudioUnitInitialize = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitInitialize");
+ pContext->coreaudio.AudioUnitRender = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitRender");
+#else
+ pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString;
+ pContext->coreaudio.CFRelease = (ma_proc)CFRelease;
+
+ #if defined(MA_APPLE_DESKTOP)
+ pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData;
+ pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize;
+ pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData;
+ pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener;
+ pContext->coreaudio.AudioObjectRemovePropertyListener = (ma_proc)AudioObjectRemovePropertyListener;
+ #endif
+
+ pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext;
+ pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose;
+ pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew;
+ pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart;
+ pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop;
+ pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener;
+ pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo;
+ pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty;
+ pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty;
+ pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize;
+ pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender;
+#endif
+
+ /* Audio component. */
+ {
+ AudioComponentDescription desc;
+ desc.componentType = kAudioUnitType_Output;
+ #if defined(MA_APPLE_DESKTOP)
+ desc.componentSubType = kAudioUnitSubType_HALOutput;
+ #else
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+ #endif
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
+ if (pContext->coreaudio.component == NULL) {
+ #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ #endif
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+ }
+
+#if !defined(MA_APPLE_MOBILE)
+ result = ma_context__init_device_tracking__coreaudio(pContext);
+ if (result != MA_SUCCESS) {
+ #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ #endif
+ return result;
+ }
+#endif
+
+ pContext->coreaudio.noAudioSessionDeactivate = pConfig->coreaudio.noAudioSessionDeactivate;
+
+ pCallbacks->onContextInit = ma_context_init__coreaudio;
+ pCallbacks->onContextUninit = ma_context_uninit__coreaudio;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__coreaudio;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__coreaudio;
+ pCallbacks->onDeviceInit = ma_device_init__coreaudio;
+ pCallbacks->onDeviceUninit = ma_device_uninit__coreaudio;
+ pCallbacks->onDeviceStart = ma_device_start__coreaudio;
+ pCallbacks->onDeviceStop = ma_device_stop__coreaudio;
+ pCallbacks->onDeviceRead = NULL;
+ pCallbacks->onDeviceWrite = NULL;
+ pCallbacks->onDeviceDataLoop = NULL;
+
+ return MA_SUCCESS;
+}
+#endif /* Core Audio */
+
+
+
+/******************************************************************************
+
+sndio Backend
+
+******************************************************************************/
+#ifdef MA_HAS_SNDIO
+#include <fcntl.h>
+
+/*
+Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due
+to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device
+just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's
+demand for it or if I can get it tested and debugged more thoroughly.
+*/
+#if 0
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/audioio.h>
+#endif
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+#include <sys/soundcard.h>
+#endif
+#endif
+
+#define MA_SIO_DEVANY "default"
+#define MA_SIO_PLAY 1
+#define MA_SIO_REC 2
+#define MA_SIO_NENC 8
+#define MA_SIO_NCHAN 8
+#define MA_SIO_NRATE 16
+#define MA_SIO_NCONF 4
+
+struct ma_sio_hdl; /* <-- Opaque */
+
+struct ma_sio_par
+{
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ unsigned int rchan;
+ unsigned int pchan;
+ unsigned int rate;
+ unsigned int bufsz;
+ unsigned int xrun;
+ unsigned int round;
+ unsigned int appbufsz;
+ int __pad[3];
+ unsigned int __magic;
+};
+
+struct ma_sio_enc
+{
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+};
+
+struct ma_sio_conf
+{
+ unsigned int enc;
+ unsigned int rchan;
+ unsigned int pchan;
+ unsigned int rate;
+};
+
+struct ma_sio_cap
+{
+ struct ma_sio_enc enc[MA_SIO_NENC];
+ unsigned int rchan[MA_SIO_NCHAN];
+ unsigned int pchan[MA_SIO_NCHAN];
+ unsigned int rate[MA_SIO_NRATE];
+ int __pad[7];
+ unsigned int nconf;
+ struct ma_sio_conf confs[MA_SIO_NCONF];
+};
+
+typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int);
+typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
+typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
+typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*);
+typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t);
+typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t);
+typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*);
+
+static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */
+{
+ ma_uint32 i;
+ for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) {
+ if (g_maStandardSampleRatePriorities[i] == sampleRate) {
+ return i;
+ }
+ }
+
+ return (ma_uint32)-1;
+}
+
+static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb)
+{
+ /* We only support native-endian right now. */
+ if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) {
+ return ma_format_unknown;
+ }
+
+ if (bits == 8 && bps == 1 && sig == 0) {
+ return ma_format_u8;
+ }
+ if (bits == 16 && bps == 2 && sig == 1) {
+ return ma_format_s16;
+ }
+ if (bits == 24 && bps == 3 && sig == 1) {
+ return ma_format_s24;
+ }
+ if (bits == 24 && bps == 4 && sig == 1 && msb == 0) {
+ /*return ma_format_s24_32;*/
+ }
+ if (bits == 32 && bps == 4 && sig == 1) {
+ return ma_format_s32;
+ }
+
+ return ma_format_unknown;
+}
+
+static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps)
+{
+ ma_format bestFormat;
+ unsigned int iConfig;
+
+ MA_ASSERT(caps != NULL);
+
+ bestFormat = ma_format_unknown;
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
+
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format == ma_format_unknown) {
+ continue; /* Format not supported. */
+ }
+
+ if (bestFormat == ma_format_unknown) {
+ bestFormat = format;
+ } else {
+ if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */
+ bestFormat = format;
+ }
+ }
+ }
+ }
+
+ return bestFormat;
+}
+
+static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat)
+{
+ ma_uint32 maxChannels;
+ unsigned int iConfig;
+
+ MA_ASSERT(caps != NULL);
+ MA_ASSERT(requiredFormat != ma_format_unknown);
+
+ /* Just pick whatever configuration has the most channels. */
+ maxChannels = 0;
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ /* The encoding should be of requiredFormat. */
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int iChannel;
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
+
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format != requiredFormat) {
+ continue;
+ }
+
+ /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
+
+ if (deviceType == ma_device_type_playback) {
+ chan = caps->confs[iConfig].pchan;
+ } else {
+ chan = caps->confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps->pchan[iChannel];
+ } else {
+ channels = caps->rchan[iChannel];
+ }
+
+ if (maxChannels < channels) {
+ maxChannels = channels;
+ }
+ }
+ }
+ }
+
+ return maxChannels;
+}
+
+static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels)
+{
+ ma_uint32 firstSampleRate;
+ ma_uint32 bestSampleRate;
+ unsigned int iConfig;
+
+ MA_ASSERT(caps != NULL);
+ MA_ASSERT(requiredFormat != ma_format_unknown);
+ MA_ASSERT(requiredChannels > 0);
+ MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS);
+
+ firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */
+ bestSampleRate = 0;
+
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ /* The encoding should be of requiredFormat. */
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int iChannel;
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
+
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format != requiredFormat) {
+ continue;
+ }
+
+ /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
+ unsigned int iRate;
+
+ if (deviceType == ma_device_type_playback) {
+ chan = caps->confs[iConfig].pchan;
+ } else {
+ chan = caps->confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps->pchan[iChannel];
+ } else {
+ channels = caps->rchan[iChannel];
+ }
+
+ if (channels != requiredChannels) {
+ continue;
+ }
+
+ /* Getting here means we have found a compatible encoding/channel pair. */
+ for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
+ ma_uint32 rate = (ma_uint32)caps->rate[iRate];
+ ma_uint32 ratePriority;
+
+ if (firstSampleRate == 0) {
+ firstSampleRate = rate;
+ }
+
+ /* Disregard this rate if it's not a standard one. */
+ ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate);
+ if (ratePriority == (ma_uint32)-1) {
+ continue;
+ }
+
+ if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */
+ bestSampleRate = rate;
+ }
+ }
+ }
+ }
+ }
+
+ /* If a standard sample rate was not found just fall back to the first one that was iterated. */
+ if (bestSampleRate == 0) {
+ bestSampleRate = firstSampleRate;
+ }
+
+ return bestSampleRate;
+}
+
+
+static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 isTerminating = MA_FALSE;
+ struct ma_sio_hdl* handle;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */
+
+ /* Playback. */
+ if (!isTerminating) {
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0);
+ if (handle != NULL) {
+ /* Supports playback. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY);
+ ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
+
+ isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
+ }
+ }
+
+ /* Capture. */
+ if (!isTerminating) {
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0);
+ if (handle != NULL) {
+ /* Supports capture. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default");
+ ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME);
+
+ isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo)
+{
+ char devid[256];
+ struct ma_sio_hdl* handle;
+ struct ma_sio_cap caps;
+ unsigned int iConfig;
+
+ MA_ASSERT(pContext != NULL);
+
+ /* We need to open the device before we can get information about it. */
+ if (pDeviceID == NULL) {
+ ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY);
+ ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME);
+ } else {
+ ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio);
+ ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid);
+ }
+
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0);
+ if (handle == NULL) {
+ return MA_NO_DEVICE;
+ }
+
+ if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) {
+ return MA_ERROR;
+ }
+
+ pDeviceInfo->nativeDataFormatCount = 0;
+
+ for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) {
+ /*
+ The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give
+ preference to some formats over others.
+ */
+ unsigned int iEncoding;
+ unsigned int iChannel;
+ unsigned int iRate;
+
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
+
+ if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps.enc[iEncoding].bits;
+ bps = caps.enc[iEncoding].bps;
+ sig = caps.enc[iEncoding].sig;
+ le = caps.enc[iEncoding].le;
+ msb = caps.enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format == ma_format_unknown) {
+ continue; /* Format not supported. */
+ }
+
+
+ /* Channels. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
+
+ if (deviceType == ma_device_type_playback) {
+ chan = caps.confs[iConfig].pchan;
+ } else {
+ chan = caps.confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps.pchan[iChannel];
+ } else {
+ channels = caps.rchan[iChannel];
+ }
+
+
+ /* Sample Rates. */
+ for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
+ if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) {
+ ma_device_info_add_native_data_format(pDeviceInfo, format, channels, caps.rate[iRate], 0);
+ }
+ }
+ }
+ }
+ }
+
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_uninit__sndio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init_handle__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType)
+{
+ const char* pDeviceName;
+ ma_ptr handle;
+ int openFlags = 0;
+ struct ma_sio_cap caps;
+ struct ma_sio_par par;
+ const ma_device_id* pDeviceID;
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
+
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex);
+ MA_ASSERT(pDevice != NULL);
+
+ if (deviceType == ma_device_type_capture) {
+ openFlags = MA_SIO_REC;
+ } else {
+ openFlags = MA_SIO_PLAY;
+ }
+
+ pDeviceID = pDescriptor->pDeviceID;
+ format = pDescriptor->format;
+ channels = pDescriptor->channels;
+ sampleRate = pDescriptor->sampleRate;
+
+ pDeviceName = MA_SIO_DEVANY;
+ if (pDeviceID != NULL) {
+ pDeviceName = pDeviceID->sndio;
+ }
+
+ handle = (ma_ptr)((ma_sio_open_proc)pDevice->pContext->sndio.sio_open)(pDeviceName, openFlags, 0);
+ if (handle == NULL) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device.");
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ }
+
+ /* We need to retrieve the device caps to determine the most appropriate format to use. */
+ if (((ma_sio_getcap_proc)pDevice->pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps.");
+ return MA_ERROR;
+ }
+
+ /*
+ Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real
+ way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this
+ to the requested channels, regardless of whether or not the default channel count is requested.
+
+ For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the
+ value returned by ma_find_best_channels_from_sio_cap__sndio().
+ */
+ if (deviceType == ma_device_type_capture) {
+ if (format == ma_format_unknown) {
+ format = ma_find_best_format_from_sio_cap__sndio(&caps);
+ }
+
+ if (channels == 0) {
+ if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
+ channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
+ } else {
+ channels = MA_DEFAULT_CHANNELS;
+ }
+ }
+ } else {
+ if (format == ma_format_unknown) {
+ format = ma_find_best_format_from_sio_cap__sndio(&caps);
+ }
+
+ if (channels == 0) {
+ if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
+ channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
+ } else {
+ channels = MA_DEFAULT_CHANNELS;
+ }
+ }
+ }
+
+ if (sampleRate == 0) {
+ sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels);
+ }
+
+
+ ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par);
+ par.msb = 0;
+ par.le = ma_is_little_endian();
+
+ switch (format) {
+ case ma_format_u8:
+ {
+ par.bits = 8;
+ par.bps = 1;
+ par.sig = 0;
+ } break;
+
+ case ma_format_s24:
+ {
+ par.bits = 24;
+ par.bps = 3;
+ par.sig = 1;
+ } break;
+
+ case ma_format_s32:
+ {
+ par.bits = 32;
+ par.bps = 4;
+ par.sig = 1;
+ } break;
+
+ case ma_format_s16:
+ case ma_format_f32:
+ case ma_format_unknown:
+ default:
+ {
+ par.bits = 16;
+ par.bps = 2;
+ par.sig = 1;
+ } break;
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ par.rchan = channels;
+ } else {
+ par.pchan = channels;
+ }
+
+ par.rate = sampleRate;
+
+ internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, par.rate, pConfig->performanceProfile);
+
+ par.round = internalPeriodSizeInFrames;
+ par.appbufsz = par.round * pDescriptor->periodCount;
+
+ if (((ma_sio_setpar_proc)pDevice->pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size.");
+ return MA_ERROR;
+ }
+
+ if (((ma_sio_getpar_proc)pDevice->pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size.");
+ return MA_ERROR;
+ }
+
+ internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb);
+ internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan;
+ internalSampleRate = par.rate;
+ internalPeriods = par.appbufsz / par.round;
+ internalPeriodSizeInFrames = par.round;
+
+ if (deviceType == ma_device_type_capture) {
+ pDevice->sndio.handleCapture = handle;
+ } else {
+ pDevice->sndio.handlePlayback = handle;
+ }
+
+ pDescriptor->format = internalFormat;
+ pDescriptor->channels = internalChannels;
+ pDescriptor->sampleRate = internalSampleRate;
+ ma_channel_map_init_standard(ma_standard_channel_map_sndio, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), internalChannels);
+ pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames;
+ pDescriptor->periodCount = internalPeriods;
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_init__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->sndio);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__sndio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__sndio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ /*
+ From the documentation:
+
+ The sio_stop() function puts the audio subsystem in the same state as before sio_start() is called. It stops recording, drains the play buffer and then
+ stops playback. If samples to play are queued but playback hasn't started yet then playback is forced immediately; playback will actually stop once the
+ buffer is drained. In no case are samples in the play buffer discarded.
+
+ Therefore, sio_stop() performs all of the necessary draining for us.
+ */
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ int result;
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ if (result == 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device.");
+ return MA_IO_ERROR;
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = frameCount;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ int result;
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
+
+ result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ if (result == 0) {
+ ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device.");
+ return MA_IO_ERROR;
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = frameCount;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_uninit__sndio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_sndio);
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__sndio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks)
+{
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libsndioNames[] = {
+ "libsndio.so"
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libsndioNames); ++i) {
+ pContext->sndio.sndioSO = ma_dlopen(pContext, libsndioNames[i]);
+ if (pContext->sndio.sndioSO != NULL) {
+ break;
+ }
+ }
+
+ if (pContext->sndio.sndioSO == NULL) {
+ return MA_NO_BACKEND;
+ }
+
+ pContext->sndio.sio_open = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_open");
+ pContext->sndio.sio_close = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_close");
+ pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_setpar");
+ pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getpar");
+ pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getcap");
+ pContext->sndio.sio_write = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_write");
+ pContext->sndio.sio_read = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_read");
+ pContext->sndio.sio_start = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_start");
+ pContext->sndio.sio_stop = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_stop");
+ pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_initpar");
+#else
+ pContext->sndio.sio_open = sio_open;
+ pContext->sndio.sio_close = sio_close;
+ pContext->sndio.sio_setpar = sio_setpar;
+ pContext->sndio.sio_getpar = sio_getpar;
+ pContext->sndio.sio_getcap = sio_getcap;
+ pContext->sndio.sio_write = sio_write;
+ pContext->sndio.sio_read = sio_read;
+ pContext->sndio.sio_start = sio_start;
+ pContext->sndio.sio_stop = sio_stop;
+ pContext->sndio.sio_initpar = sio_initpar;
+#endif
+
+ pCallbacks->onContextInit = ma_context_init__sndio;
+ pCallbacks->onContextUninit = ma_context_uninit__sndio;
+ pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__sndio;
+ pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__sndio;
+ pCallbacks->onDeviceInit = ma_device_init__sndio;
+ pCallbacks->onDeviceUninit = ma_device_uninit__sndio;
+ pCallbacks->onDeviceStart = ma_device_start__sndio;
+ pCallbacks->onDeviceStop = ma_device_stop__sndio;
+ pCallbacks->onDeviceRead = ma_device_read__sndio;
+ pCallbacks->onDeviceWrite = ma_device_write__sndio;
+ pCallbacks->onDeviceDataLoop = NULL;
+
+ (void)pConfig;
+ return MA_SUCCESS;
+}
+#endif /* sndio */
+
+
+
+/******************************************************************************
+
+audio(4) Backend
+
+******************************************************************************/
+#ifdef MA_HAS_AUDIO4
+#include <fcntl.h>
+#include <poll.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/audioio.h>
+
+#if defined(__OpenBSD__)
+ #include <sys/param.h>
+ #if defined(OpenBSD) && OpenBSD >= 201709
+ #define MA_AUDIO4_USE_NEW_API
+ #endif
+#endif
+
+static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex)
+{
+ size_t baseLen;
+
+ MA_ASSERT(id != NULL);
+ MA_ASSERT(idSize > 0);
+ MA_ASSERT(deviceIndex >= 0);
+
+ baseLen = strlen(base);
+ MA_ASSERT(idSize > baseLen);
+
+ ma_strcpy_s(id, idSize, base);
+ ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10);
+}
+
+static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut)
+{
+ size_t idLen;
+ size_t baseLen;
+ const char* deviceIndexStr;
+
+ MA_ASSERT(id != NULL);
+ MA_ASSERT(base != NULL);
+ MA_ASSERT(pIndexOut != NULL);
+
+ idLen = strlen(id);
+ baseLen = strlen(base);
+ if (idLen <= baseLen) {
+ return MA_ERROR; /* Doesn't look like the id starts with the base. */
+ }
+
+ if (strncmp(id, base, baseLen) != 0) {
+ return MA_ERROR; /* ID does not begin with base. */
+ }
+
+ deviceIndexStr = id + baseLen;
+ if (deviceIndexStr[0] == '\0') {
+ return MA_ERROR; /* No index specified in the ID. */
+ }
+
+ if (pIndexOut) {
+ *pIndexOut = atoi(deviceIndexStr);
+ }
+
+ return MA_SUCCESS;
+}
+
+
+#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
+static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision)
+{
+ if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) {
+ return ma_format_u8;
+ } else {
+ if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) {
+ if (precision == 16) {
+ return ma_format_s16;
+ } else if (precision == 24) {
+ return ma_format_s24;
+ } else if (precision == 32) {
+ return ma_format_s32;
+ }
+ } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) {
+ if (precision == 16) {
+ return ma_format_s16;
+ } else if (precision == 24) {
+ return ma_format_s24;
+ } else if (precision == 32) {
+ return ma_format_s32;
+ }
+ }
+ }
+
+ return ma_format_unknown; /* Encoding not supported. */
+}
+
+static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision)
+{
+ MA_ASSERT(pEncoding != NULL);
+ MA_ASSERT(pPrecision != NULL);
+
+ switch (format)
+ {
+ case ma_format_u8: