From f17b92e32e2edae3692d5ea87844fbdf4a779952 Mon Sep 17 00:00:00 2001 From: hgn Date: Sun, 18 Jul 2021 12:52:29 +0100 Subject: [PATCH] init --- .gitignore | 46 + build.sh | 57 + dr_soft/miniaudio.h | 42923 ++++++++++++++++++++++++++++++++++++++ fishladder.c | 29 + gl/KHR/khrplatform.h | 290 + gl/glad.c | 3814 ++++ gl/glad/glad.h | 2509 +++ gl/glfw3.h | 5873 ++++++ gl/glfw3native.h | 525 + stb/stb_vorbis.h | 5558 +++++ steam/steamworks_thin.h | 848 + vg/config.h | 17 + vg/vg.c | 0 vg/vg.h | 346 + vg/vg_audio.h | 693 + vg/vg_io.h | 105 + vg/vg_platform.h | 91 + 17 files changed, 63724 insertions(+) create mode 100755 .gitignore create mode 100755 build.sh create mode 100644 dr_soft/miniaudio.h create mode 100644 fishladder.c create mode 100644 gl/KHR/khrplatform.h create mode 100644 gl/glad.c create mode 100644 gl/glad/glad.h create mode 100755 gl/glfw3.h create mode 100755 gl/glfw3native.h create mode 100644 stb/stb_vorbis.h create mode 100644 steam/steamworks_thin.h create mode 100644 vg/config.h create mode 100644 vg/vg.c create mode 100644 vg/vg.h create mode 100644 vg/vg_audio.h create mode 100644 vg/vg_io.h create mode 100644 vg/vg_platform.h diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..7913c0a --- /dev/null +++ b/.gitignore @@ -0,0 +1,46 @@ +# Gitignore for MMV project. +# Whitelist mode + +# Ignore all but directories +* +!*/ + +./build.linux + +# ALLOW ============================ +!.gitattributes +!.gitignore + +# Code sources _____________________ +# C source files +!*.c +!*.h + +# Blender projects +!*.blend + +# GLSL shader source files +!*.fs +!*.vs +!*.gls + +# Python source files +!*.py + +# Build scripts +!*.sh +!*.bat + +# Compiled resources _______________ +# MMV proprietary files +!*.vmd +!*.vma +!*.cfg +!*.vmv + +# Other game assets (3rd party) +!*.bdf +!*.png +!*.ogg +!*.txt +!*.tga diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..f21790a --- /dev/null +++ b/build.sh @@ -0,0 +1,57 @@ +# Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +src="fishladder.c" +target="fishladder" +lib="-I. -L./lib -L./" +libs="-lGL -lglfw -lX11 -lXxf86vm -lXrandr -lm -lpthread -lXi -ldl -l:steam/libsteam_api.so" +flags="-fsanitize=address -ggdb3" + +run_after=false +do_build=true + +while (( "$#" )); do + case $1 in + -r|--release) + flags="-O3" + echo "Release mode" + ;; + -p|--play) + run_after=true + echo "& Run" + ;; + -n|--nobuild) + do_build=false + echo "no-build" + ;; + *) + echo "Unkown param: $1" + exit 1 + ;; + esac + shift +done + +# Main build +if [ "$do_build" = true ]; then + gcc -Wall -Wstrict-aliasing=3 $lib $flags $src gl/glad.c -o $target $libs -Wl,-rpath=./ $defines + + if [ $? -ne 0 ]; then + echo "GCC build failed" + exit 1 + fi + + echo "Build succeeded" +fi + +# Directories to initialize +mkdir build.linux/cfg -p +cp $target ./build.linux/$target +cp ./steam/libsteam_api.so ./build.linux/libsteam_api.so + +if [ "$run_after" = true ]; then + echo "Playing" + + cd ./build.linux/ + ./$target + cd ./../ +fi diff --git a/dr_soft/miniaudio.h b/dr_soft/miniaudio.h new file mode 100644 index 0000000..84bc1e2 --- /dev/null +++ b/dr_soft/miniaudio.h @@ -0,0 +1,42923 @@ +/* +Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file. +miniaudio - v0.10.4 - 2020-04-12 + +David Reid - davidreidsoftware@gmail.com + +Website: https://miniaud.io +GitHub: https://github.com/dr-soft/miniaudio +*/ + +/* +RELEASE NOTES - VERSION 0.10.x +============================== +Version 0.10 includes major API changes and refactoring, mostly concerned with the data conversion system. Data conversion is performed internally to convert +audio data between the format requested when initializing the `ma_device` object and the format of the internal device used by the backend. The same applies +to the `ma_decoder` object. The previous design has several design flaws and missing features which necessitated a complete redesign. + + +Changes to Data Conversion +-------------------------- +The previous data conversion system used callbacks to deliver input data for conversion. This design works well in some specific situations, but in other +situations it has some major readability and maintenance issues. The decision was made to replace this with a more iterative approach where you just pass in a +pointer to the input data directly rather than dealing with a callback. + +The following are the data conversion APIs that have been removed and their replacements: + + - ma_format_converter -> ma_convert_pcm_frames_format() + - ma_channel_router -> ma_channel_converter + - ma_src -> ma_resampler + - ma_pcm_converter -> ma_data_converter + +The previous conversion APIs accepted a callback in their configs. There are no longer any callbacks to deal with. Instead you just pass the data into the +`*_process_pcm_frames()` function as a pointer to a buffer. + +The simplest aspect of data conversion is sample format conversion. To convert between two formats, just call `ma_convert_pcm_frames_format()`. Channel +conversion is also simple which you can do with `ma_channel_converter` via `ma_channel_converter_process_pcm_frames()`. + +Resampling is more complicated because the number of output frames that are processed is different to the number of input frames that are consumed. When you +call `ma_resampler_process_pcm_frames()` you need to pass in the number of input frames available for processing and the number of output frames you want to +output. Upon returning they will receive the number of input frames that were consumed and the number of output frames that were generated. + +The `ma_data_converter` API is a wrapper around format, channel and sample rate conversion and handles all of the data conversion you'll need which probably +makes it the best option if you need to do data conversion. + +In addition to changes to the API design, a few other changes have been made to the data conversion pipeline: + + - The sinc resampler has been removed. This was completely broken and never actually worked properly. + - The linear resampler now uses low-pass filtering to remove aliasing. The quality of the low-pass filter can be controlled via the resampler config with the + `lpfOrder` option, which has a maximum value of MA_MAX_FILTER_ORDER. + - Data conversion now supports s16 natively which runs through a fixed point pipeline. Previously everything needed to be converted to floating point before + processing, whereas now both s16 and f32 are natively supported. Other formats still require conversion to either s16 or f32 prior to processing, however + `ma_data_converter` will handle this for you. + + +Custom Memory Allocators +------------------------ +miniaudio has always supported macro level customization for memory allocation via MA_MALLOC, MA_REALLOC and MA_FREE, however some scenarios require more +flexibility by allowing a user data pointer to be passed to the custom allocation routines. Support for this has been added to version 0.10 via the +`ma_allocation_callbacks` structure. Anything making use of heap allocations has been updated to accept this new structure. + +The `ma_context_config` structure has been updated with a new member called `allocationCallbacks`. Leaving this set to it's defaults returned by +`ma_context_config_init()` will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. Likewise, The `ma_decoder_config` structure has been updated in the same +way, and leaving everything as-is after `ma_decoder_config_init()` will cause it to use the same defaults. + +The following APIs have been updated to take a pointer to a `ma_allocation_callbacks` object. Setting this parameter to NULL will cause it to use defaults. +Otherwise they will use the relevant callback in the structure. + + - ma_malloc() + - ma_realloc() + - ma_free() + - ma_aligned_malloc() + - ma_aligned_free() + - ma_rb_init() / ma_rb_init_ex() + - ma_pcm_rb_init() / ma_pcm_rb_init_ex() + +Note that you can continue to use MA_MALLOC, MA_REALLOC and MA_FREE as per normal. These will continue to be used by default if you do not specify custom +allocation callbacks. + + +Buffer and Period Configuration Changes +--------------------------------------- +The way in which the size of the internal buffer and periods are specified in the device configuration have changed. In previous versions, the config variables +`bufferSizeInFrames` and `bufferSizeInMilliseconds` defined the size of the entire buffer, with the size of a period being the size of this variable divided by +the period count. This became confusing because people would expect the value of `bufferSizeInFrames` or `bufferSizeInMilliseconds` to independantly determine +latency, when in fact it was that value divided by the period count that determined it. These variables have been removed and replaced with new ones called +`periodSizeInFrames` and `periodSizeInMilliseconds`. + +These new configuration variables work in the same way as their predecessors in that if one is set to 0, the other will be used, but the main difference is +that you now set these to you desired latency rather than the size of the entire buffer. The benefit of this is that it's much easier and less confusing to +configure latency. + +The following unused APIs have been removed: + + ma_get_default_buffer_size_in_milliseconds() + ma_get_default_buffer_size_in_frames() + +The following macros have been removed: + + MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY + MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE + + +Other API Changes +----------------- +Other less major API changes have also been made in version 0.10. + +`ma_device_set_stop_callback()` has been removed. If you require a stop callback, you must now set it via the device config just like the data callback. + +The `ma_sine_wave` API has been replaced with a more general API called `ma_waveform`. This supports generation of different types of waveforms, including +sine, square, triangle and sawtooth. Use `ma_waveform_init()` in place of `ma_sine_wave_init()` to initialize the waveform object. This takes a configuration +object called `ma_waveform_config` which defines the properties of the waveform. Use `ma_waveform_config_init()` to initialize a `ma_waveform_config` object. +Use `ma_waveform_read_pcm_frames()` in place of `ma_sine_wave_read_f32()` and `ma_sine_wave_read_f32_ex()`. + +`ma_convert_frames()` and `ma_convert_frames_ex()` have been changed. Both of these functions now take a new parameter called `frameCountOut` which specifies +the size of the output buffer in PCM frames. This has been added for safety. In addition to this, the parameters for `ma_convert_frames_ex()` have changed to +take a pointer to a `ma_data_converter_config` object to specify the input and output formats to convert between. This was done to make it more flexible, to +prevent the parameter list getting too long, and to prevent API breakage whenever a new conversion property is added. + +`ma_calculate_frame_count_after_src()` has been renamed to `ma_calculate_frame_count_after_resampling()` for consistency with the new `ma_resampler` API. + + +Filters +------- +The following filters have been added: + + |-------------|-------------------------------------------------------------------| + | API | Description | + |-------------|-------------------------------------------------------------------| + | ma_biquad | Biquad filter (transposed direct form 2) | + | ma_lpf1 | First order low-pass filter | + | ma_lpf2 | Second order low-pass filter | + | ma_lpf | High order low-pass filter (Butterworth) | + | ma_hpf1 | First order high-pass filter | + | ma_hpf2 | Second order high-pass filter | + | ma_hpf | High order high-pass filter (Butterworth) | + | ma_bpf2 | Second order band-pass filter | + | ma_bpf | High order band-pass filter | + | ma_peak2 | Second order peaking filter | + | ma_notch2 | Second order notching filter | + | ma_loshelf2 | Second order low shelf filter | + | ma_hishelf2 | Second order high shelf filter | + |-------------|-------------------------------------------------------------------| + +These filters all support 32-bit floating point and 16-bit signed integer formats natively. Other formats need to be converted beforehand. + + +Sine, Square, Triangle and Sawtooth Waveforms +--------------------------------------------- +Previously miniaudio supported only sine wave generation. This has now been generalized to support sine, square, triangle and sawtooth waveforms. The old +`ma_sine_wave` API has been removed and replaced with the `ma_waveform` API. Use `ma_waveform_config_init()` to initialize a config object, and then pass it +into `ma_waveform_init()`. Then use `ma_waveform_read_pcm_frames()` to read PCM data. + + +Noise Generation +---------------- +A noise generation API has been added. This is used via the `ma_noise` API. Currently white, pink and Brownian noise is supported. The `ma_noise` API is +similar to the waveform API. Use `ma_noise_config_init()` to initialize a config object, and then pass it into `ma_noise_init()` to initialize a `ma_noise` +object. Then use `ma_noise_read_pcm_frames()` to read PCM data. + + +Miscellaneous Changes +--------------------- +The MA_NO_STDIO option has been removed. This would disable file I/O APIs, however this has proven to be too hard to maintain for it's perceived value and was +therefore removed. + +Internal functions have all been made static where possible. If you get warnings about unused functions, please submit a bug report. + +The `ma_device` structure is no longer defined as being aligned to MA_SIMD_ALIGNMENT. This resulted in a possible crash when allocating a `ma_device` object on +the heap, but not aligning it to MA_SIMD_ALIGNMENT. This crash would happen due to the compiler seeing the alignment specified on the structure and assuming it +was always aligned as such and thinking it was safe to emit alignment-dependant SIMD instructions. Since miniaudio's philosophy is for things to just work, +this has been removed from all structures. + +Results codes have been overhauled. Unnecessary result codes have been removed, and some have been renumbered for organisation purposes. If you are are binding +maintainer you will need to update your result codes. Support has also been added for retrieving a human readable description of a given result code via the +`ma_result_description()` API. + +ALSA: The automatic format conversion, channel conversion and resampling performed by ALSA is now disabled by default as they were causing some compatibility +issues with certain devices and configurations. These can be individually enabled via the device config: + + ```c + deviceConfig.alsa.noAutoFormat = MA_TRUE; + deviceConfig.alsa.noAutoChannels = MA_TRUE; + deviceConfig.alsa.noAutoResample = MA_TRUE; + ``` +*/ + + +/* +Introduction +============ +miniaudio is a single file library for audio playback and capture. To use it, do the following in one .c file: + + ```c + #define MINIAUDIO_IMPLEMENTATION + #include "miniaudio.h + ``` + +You can #include miniaudio.h in other parts of the program just like any other header. + +miniaudio uses the concept of a "device" as the abstraction for physical devices. The idea is that you choose a physical device to emit or capture audio from, +and then move data to/from the device when miniaudio tells you to. Data is delivered to and from devices asynchronously via a callback which you specify when +initializing the device. + +When initializing the device you first need to configure it. The device configuration allows you to specify things like the format of the data delivered via +the callback, the size of the internal buffer and the ID of the device you want to emit or capture audio from. + +Once you have the device configuration set up you can initialize the device. When initializing a device you need to allocate memory for the device object +beforehand. This gives the application complete control over how the memory is allocated. In the example below we initialize a playback device on the stack, +but you could allocate it on the heap if that suits your situation better. + + ```c + void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) + { + // In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both pOutput and pInput will be valid and you can + // move data from pInput into pOutput. Never process more than frameCount frames. + } + + ... + + ma_device_config config = ma_device_config_init(ma_device_type_playback); + config.playback.format = MY_FORMAT; + config.playback.channels = MY_CHANNEL_COUNT; + config.sampleRate = MY_SAMPLE_RATE; + config.dataCallback = data_callback; + config.pUserData = pMyCustomData; // Can be accessed from the device object (device.pUserData). + + ma_device device; + if (ma_device_init(NULL, &config, &device) != MA_SUCCESS) { + ... An error occurred ... + } + + ma_device_start(&device); // The device is sleeping by default so you'll need to start it manually. + + ... + + ma_device_uninit(&device); // This will stop the device so no need to do that manually. + ``` + +In the example above, `data_callback()` is where audio data is written and read from the device. The idea is in playback mode you cause sound to be emitted +from the speakers by writing audio data to the output buffer (`pOutput` in the example). In capture mode you read data from the input buffer (`pInput`) to +extract sound captured by the microphone. The `frameCount` parameter tells you how many frames can be written to the output buffer and read from the input +buffer. A "frame" is one sample for each channel. For example, in a stereo stream (2 channels), one frame is 2 samples: one for the left, one for the right. +The channel count is defined by the device config. The size in bytes of an individual sample is defined by the sample format which is also specified in the +device config. Multi-channel audio data is always interleaved, which means the samples for each frame are stored next to each other in memory. For example, in +a stereo stream the first pair of samples will be the left and right samples for the first frame, the second pair of samples will be the left and right samples +for the second frame, etc. + +The configuration of the device is defined by the `ma_device_config` structure. The config object is always initialized with `ma_device_config_init()`. It's +important to always initialize the config with this function as it initializes it with logical defaults and ensures your program doesn't break when new members +are added to the `ma_device_config` structure. The example above uses a fairly simple and standard device configuration. The call to `ma_device_config_init()` +takes a single parameter, which is whether or not the device is a playback, capture, duplex or loopback device (loopback devices are not supported on all +backends). The `config.playback.format` member sets the sample format which can be one of the following (all formats are native-endian): + + |---------------|----------------------------------------|---------------------------| + | Symbol | Description | Range | + |---------------|----------------------------------------|---------------------------| + | ma_format_f32 | 32-bit floating point | [-1, 1] | + | ma_format_s16 | 16-bit signed integer | [-32768, 32767] | + | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] | + | ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] | + | ma_format_u8 | 8-bit unsigned integer | [0, 255] | + |---------------|----------------------------------------|---------------------------| + +The `config.playback.channels` member sets the number of channels to use with the device. The channel count cannot exceed MA_MAX_CHANNELS. The +`config.sampleRate` member sets the sample rate (which must be the same for both playback and capture in full-duplex configurations). This is usually set to +44100 or 48000, but can be set to anything. It's recommended to keep this between 8000 and 384000, however. + +Note that leaving the format, channel count and/or sample rate at their default values will result in the internal device's native configuration being used +which is useful if you want to avoid the overhead of miniaudio's automatic data conversion. + +In addition to the sample format, channel count and sample rate, the data callback and user data pointer are also set via the config. The user data pointer is +not passed into the callback as a parameter, but is instead set to the `pUserData` member of `ma_device` which you can access directly since all miniaudio +structures are transparent. + +Initializing the device is done with `ma_device_init()`. This will return a result code telling you what went wrong, if anything. On success it will return +`MA_SUCCESS`. After initialization is complete the device will be in a stopped state. To start it, use `ma_device_start()`. Uninitializing the device will stop +it, which is what the example above does, but you can also stop the device with `ma_device_stop()`. To resume the device simply call `ma_device_start()` again. +Note that it's important to never stop or start the device from inside the callback. This will result in a deadlock. Instead you set a variable or signal an +event indicating that the device needs to stop and handle it in a different thread. The following APIs must never be called inside the callback: + + ma_device_init() + ma_device_init_ex() + ma_device_uninit() + ma_device_start() + ma_device_stop() + +You must never try uninitializing and reinitializing a device inside the callback. You must also never try to stop and start it from inside the callback. There +are a few other things you shouldn't do in the callback depending on your requirements, however this isn't so much a thread-safety thing, but rather a real- +time processing thing which is beyond the scope of this introduction. + +The example above demonstrates the initialization of a playback device, but it works exactly the same for capture. All you need to do is change the device type +from `ma_device_type_playback` to `ma_device_type_capture` when setting up the config, like so: + + ```c + ma_device_config config = ma_device_config_init(ma_device_type_capture); + config.capture.format = MY_FORMAT; + config.capture.channels = MY_CHANNEL_COUNT; + ``` + +In the data callback you just read from the input buffer (`pInput` in the example above) and leave the output buffer alone (it will be set to NULL when the +device type is set to `ma_device_type_capture`). + +These are the available device types and how you should handle the buffers in the callback: + + |-------------------------|--------------------------------------------------------| + | Device Type | Callback Behavior | + |-------------------------|--------------------------------------------------------| + | ma_device_type_playback | Write to output buffer, leave input buffer untouched. | + | ma_device_type_capture | Read from input buffer, leave output buffer untouched. | + | ma_device_type_duplex | Read from input buffer, write to output buffer. | + | ma_device_type_loopback | Read from input buffer, leave output buffer untouched. | + |-------------------------|--------------------------------------------------------| + +You will notice in the example above that the sample format and channel count is specified separately for playback and capture. This is to support different +data formats between the playback and capture devices in a full-duplex system. An example may be that you want to capture audio data as a monaural stream (one +channel), but output sound to a stereo speaker system. Note that if you use different formats between playback and capture in a full-duplex configuration you +will need to convert the data yourself. There are functions available to help you do this which will be explained later. + +The example above did not specify a physical device to connect to which means it will use the operating system's default device. If you have multiple physical +devices connected and you want to use a specific one you will need to specify the device ID in the configuration, like so: + + ``` + config.playback.pDeviceID = pMyPlaybackDeviceID; // Only if requesting a playback or duplex device. + config.capture.pDeviceID = pMyCaptureDeviceID; // Only if requesting a capture, duplex or loopback device. + ``` + +To retrieve the device ID you will need to perform device enumeration, however this requires the use of a new concept called the "context". Conceptually +speaking the context sits above the device. There is one context to many devices. The purpose of the context is to represent the backend at a more global level +and to perform operations outside the scope of an individual device. Mainly it is used for performing run-time linking against backend libraries, initializing +backends and enumerating devices. The example below shows how to enumerate devices. + + ```c + ma_context context; + if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) { + // Error. + } + + ma_device_info* pPlaybackDeviceInfos; + ma_uint32 playbackDeviceCount; + ma_device_info* pCaptureDeviceInfos; + ma_uint32 captureDeviceCount; + if (ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, &pCaptureDeviceInfos, &captureDeviceCount) != MA_SUCCESS) { + // Error. + } + + // Loop over each device info and do something with it. Here we just print the name with their index. You may want to give the user the + // opportunity to choose which device they'd prefer. + for (ma_uint32 iDevice = 0; iDevice < playbackDeviceCount; iDevice += 1) { + printf("%d - %s\n", iDevice, pPlaybackDeviceInfos[iDevice].name); + } + + ma_device_config config = ma_device_config_init(ma_device_type_playback); + config.playback.pDeviceID = &pPlaybackDeviceInfos[chosenPlaybackDeviceIndex].id; + config.playback.format = MY_FORMAT; + config.playback.channels = MY_CHANNEL_COUNT; + config.sampleRate = MY_SAMPLE_RATE; + config.dataCallback = data_callback; + config.pUserData = pMyCustomData; + + ma_device device; + if (ma_device_init(&context, &config, &device) != MA_SUCCESS) { + // Error + } + + ... + + ma_device_uninit(&device); + ma_context_uninit(&context); + ``` + +The first thing we do in this example is initialize a `ma_context` object with `ma_context_init()`. The first parameter is a pointer to a list of `ma_backend` +values which are used to override the default backend priorities. When this is NULL, as in this example, miniaudio's default priorities are used. The second +parameter is the number of backends listed in the array pointed to by the first parameter. The third parameter is a pointer to a `ma_context_config` object +which can be NULL, in which case defaults are used. The context configuration is used for setting the logging callback, custom memory allocation callbacks, +user-defined data and some backend-specific configurations. + +Once the context has been initialized you can enumerate devices. In the example above we use the simpler `ma_context_get_devices()`, however you can also use a +callback for handling devices by using `ma_context_enumerate_devices()`. When using `ma_context_get_devices()` you provide a pointer to a pointer that will, +upon output, be set to a pointer to a buffer containing a list of `ma_device_info` structures. You also provide a pointer to an unsigned integer that will +receive the number of items in the returned buffer. Do not free the returned buffers as their memory is managed internally by miniaudio. + +The `ma_device_info` structure contains an `id` member which is the ID you pass to the device config. It also contains the name of the device which is useful +for presenting a list of devices to the user via the UI. + +When creating your own context you will want to pass it to `ma_device_init()` when initializing the device. Passing in NULL, like we do in the first example, +will result in miniaudio creating the context for you, which you don't want to do since you've already created a context. Note that internally the context is +only tracked by it's pointer which means you must not change the location of the `ma_context` object. If this is an issue, consider using `malloc()` to +allocate memory for the context. + + + +Building +======== +miniaudio should work cleanly out of the box without the need to download or install any dependencies. See below for platform-specific details. + + +Windows +------- +The Windows build should compile cleanly on all popular compilers without the need to configure any include paths nor link to any libraries. + +macOS and iOS +------------- +The macOS build should compile cleanly without the need to download any dependencies nor link to any libraries or frameworks. The iOS build needs to be +compiled as Objective-C (sorry) and will need to link the relevant frameworks but should Just Work with Xcode. Compiling through the command line requires +linking to -lpthread and -lm. + +Linux +----- +The Linux build only requires linking to -ldl, -lpthread and -lm. You do not need any development packages. + +BSD +--- +The BSD build only requires linking to -lpthread and -lm. NetBSD uses audio(4), OpenBSD uses sndio and FreeBSD uses OSS. + +Android +------- +AAudio is the highest priority backend on Android. This should work out of the box without needing any kind of compiler configuration. Support for AAudio +starts with Android 8 which means older versions will fall back to OpenSL|ES which requires API level 16+. + +Emscripten +---------- +The Emscripten build emits Web Audio JavaScript directly and should Just Work without any configuration. You cannot use -std=c* compiler flags, nor -ansi. + + +Build Options +------------- +#define these options before including miniaudio.h. + +#define MA_NO_WASAPI + Disables the WASAPI backend. + +#define MA_NO_DSOUND + Disables the DirectSound backend. + +#define MA_NO_WINMM + Disables the WinMM backend. + +#define MA_NO_ALSA + Disables the ALSA backend. + +#define MA_NO_PULSEAUDIO + Disables the PulseAudio backend. + +#define MA_NO_JACK + Disables the JACK backend. + +#define MA_NO_COREAUDIO + Disables the Core Audio backend. + +#define MA_NO_SNDIO + Disables the sndio backend. + +#define MA_NO_AUDIO4 + Disables the audio(4) backend. + +#define MA_NO_OSS + Disables the OSS backend. + +#define MA_NO_AAUDIO + Disables the AAudio backend. + +#define MA_NO_OPENSL + Disables the OpenSL|ES backend. + +#define MA_NO_WEBAUDIO + Disables the Web Audio backend. + +#define MA_NO_NULL + Disables the null backend. + +#define MA_NO_DECODING + Disables the decoding APIs. + +#define MA_NO_DEVICE_IO + Disables playback and recording. This will disable ma_context and ma_device APIs. This is useful if you only want to use miniaudio's data conversion and/or + decoding APIs. + +#define MA_NO_SSE2 + Disables SSE2 optimizations. + +#define MA_NO_AVX2 + Disables AVX2 optimizations. + +#define MA_NO_AVX512 + Disables AVX-512 optimizations. + +#define MA_NO_NEON + Disables NEON optimizations. + +#define MA_LOG_LEVEL + Sets the logging level. Set level to one of the following: + MA_LOG_LEVEL_VERBOSE + MA_LOG_LEVEL_INFO + MA_LOG_LEVEL_WARNING + MA_LOG_LEVEL_ERROR + +#define MA_DEBUG_OUTPUT + Enable printf() debug output. + +#define MA_COINIT_VALUE + Windows only. The value to pass to internal calls to CoInitializeEx(). Defaults to COINIT_MULTITHREADED. + +#define MA_API + Controls how public APIs should be decorated. Defaults to `extern`. + +#define MA_DLL + If set, configures MA_API to either import or export APIs depending on whether or not the implementation is being defined. If defining the implementation, + MA_API will be configured to export. Otherwise it will be configured to import. This has no effect if MA_API is defined externally. + + + + +Definitions +=========== +This section defines common terms used throughout miniaudio. Unfortunately there is often ambiguity in the use of terms throughout the audio space, so this +section is intended to clarify how miniaudio uses each term. + +Sample +------ +A sample is a single unit of audio data. If the sample format is f32, then one sample is one 32-bit floating point number. + +Frame / PCM Frame +----------------- +A frame is a group of samples equal to the number of channels. For a stereo stream a frame is 2 samples, a mono frame is 1 sample, a 5.1 surround sound frame +is 6 samples, etc. The terms "frame" and "PCM frame" are the same thing in miniaudio. Note that this is different to a compressed frame. If ever miniaudio +needs to refer to a compressed frame, such as a FLAC frame, it will always clarify what it's referring to with something like "FLAC frame". + +Channel +------- +A stream of monaural audio that is emitted from an individual speaker in a speaker system, or received from an individual microphone in a microphone system. A +stereo stream has two channels (a left channel, and a right channel), a 5.1 surround sound system has 6 channels, etc. Some audio systems refer to a channel as +a complex audio stream that's mixed with other channels to produce the final mix - this is completely different to miniaudio's use of the term "channel" and +should not be confused. + +Sample Rate +----------- +The sample rate in miniaudio is always expressed in Hz, such as 44100, 48000, etc. It's the number of PCM frames that are processed per second. + +Formats +------- +Throughout miniaudio you will see references to different sample formats: + + |---------------|----------------------------------------|---------------------------| + | Symbol | Description | Range | + |---------------|----------------------------------------|---------------------------| + | ma_format_f32 | 32-bit floating point | [-1, 1] | + | ma_format_s16 | 16-bit signed integer | [-32768, 32767] | + | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] | + | ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] | + | ma_format_u8 | 8-bit unsigned integer | [0, 255] | + |---------------|----------------------------------------|---------------------------| + +All formats are native-endian. + + + +Decoding +======== +The `ma_decoder` API is used for reading audio files. To enable a decoder you must #include the header of the relevant backend library before the +implementation of miniaudio. You can find copies of these in the "extras" folder in the miniaudio repository (https://github.com/dr-soft/miniaudio). + +The table below are the supported decoding backends: + + |--------|-----------------| + | Type | Backend Library | + |--------|-----------------| + | WAV | dr_wav.h | + | FLAC | dr_flac.h | + | MP3 | dr_mp3.h | + | Vorbis | stb_vorbis.c | + |--------|-----------------| + +The code below is an example of how to enable decoding backends: + + ```c + #include "dr_flac.h" // Enables FLAC decoding. + #include "dr_mp3.h" // Enables MP3 decoding. + #include "dr_wav.h" // Enables WAV decoding. + + #define MINIAUDIO_IMPLEMENTATION + #include "miniaudio.h" + ``` + +A decoder can be initialized from a file with `ma_decoder_init_file()`, a block of memory with `ma_decoder_init_memory()`, or from data delivered via callbacks +with `ma_decoder_init()`. Here is an example for loading a decoder from a file: + + ```c + ma_decoder decoder; + ma_result result = ma_decoder_init_file("MySong.mp3", NULL, &decoder); + if (result != MA_SUCCESS) { + return false; // An error occurred. + } + + ... + + ma_decoder_uninit(&decoder); + ``` + +When initializing a decoder, you can optionally pass in a pointer to a ma_decoder_config object (the NULL argument in the example above) which allows you to +configure the output format, channel count, sample rate and channel map: + + ```c + ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 2, 48000); + ``` + +When passing in NULL for decoder config in `ma_decoder_init*()`, the output format will be the same as that defined by the decoding backend. + +Data is read from the decoder as PCM frames: + + ```c + ma_uint64 framesRead = ma_decoder_read_pcm_frames(pDecoder, pFrames, framesToRead); + ``` + +You can also seek to a specific frame like so: + + ```c + ma_result result = ma_decoder_seek_to_pcm_frame(pDecoder, targetFrame); + if (result != MA_SUCCESS) { + return false; // An error occurred. + } + ``` + +When loading a decoder, miniaudio uses a trial and error technique to find the appropriate decoding backend. This can be unnecessarily inefficient if the type +is already known. In this case you can use the `_wav`, `_mp3`, etc. varients of the aforementioned initialization APIs: + + ```c + ma_decoder_init_wav() + ma_decoder_init_mp3() + ma_decoder_init_memory_wav() + ma_decoder_init_memory_mp3() + ma_decoder_init_file_wav() + ma_decoder_init_file_mp3() + etc. + ``` + +The `ma_decoder_init_file()` API will try using the file extension to determine which decoding backend to prefer. + + + +Encoding +======== +The `ma_encoding` API is used for writing audio files. To enable an encoder you must #include the header of the relevant backend library before the +implementation of miniaudio. You can find copies of these in the "extras" folder in the miniaudio repository (https://github.com/dr-soft/miniaudio). + +The table below are the supported encoding backends: + + |--------|-----------------| + | Type | Backend Library | + |--------|-----------------| + | WAV | dr_wav.h | + |--------|-----------------| + +The code below is an example of how to enable encoding backends: + + ```c + #include "dr_wav.h" // Enables WAV encoding. + + #define MINIAUDIO_IMPLEMENTATION + #include "miniaudio.h" + ``` + +An encoder can be initialized to write to a file with `ma_encoder_init_file()` or from data delivered via callbacks with `ma_encoder_init()`. Below is an +example for initializing an encoder to output to a file. + + ```c + ma_encoder_config config = ma_encoder_config_init(ma_resource_format_wav, FORMAT, CHANNELS, SAMPLE_RATE); + ma_encoder encoder; + ma_result result = ma_encoder_init_file("my_file.wav", &config, &encoder); + if (result != MA_SUCCESS) { + // Error + } + + ... + + ma_encoder_uninit(&encoder); + ``` + +When initializing an encoder you must specify a config which is initialized with `ma_encoder_config_init()`. Here you must specify the file type, the output +sample format, output channel count and output sample rate. The following file types are supported: + + |------------------------|-------------| + | Enum | Description | + |------------------------|-------------| + | ma_resource_format_wav | WAV | + |------------------------|-------------| + +If the format, channel count or sample rate is not supported by the output file type an error will be returned. The encoder will not perform data conversion so +you will need to convert it before outputting any audio data. To output audio data, use `ma_encoder_write_pcm_frames()`, like in the example below: + + ```c + framesWritten = ma_encoder_write_pcm_frames(&encoder, pPCMFramesToWrite, framesToWrite); + ``` + +Encoders must be uninitialized with `ma_encoder_uninit()`. + + + +Sample Format Conversion +======================== +Conversion between sample formats is achieved with the `ma_pcm_*_to_*()`, `ma_pcm_convert()` and `ma_convert_pcm_frames_format()` APIs. Use `ma_pcm_*_to_*()` +to convert between two specific formats. Use `ma_pcm_convert()` to convert based on a `ma_format` variable. Use `ma_convert_pcm_frames_format()` to convert +PCM frames where you want to specify the frame count and channel count as a variable instead of the total sample count. + +Dithering +--------- +Dithering can be set using the ditherMode parameter. + +The different dithering modes include the following, in order of efficiency: + + |-----------|--------------------------| + | Type | Enum Token | + |-----------|--------------------------| + | None | ma_dither_mode_none | + | Rectangle | ma_dither_mode_rectangle | + | Triangle | ma_dither_mode_triangle | + |-----------|--------------------------| + +Note that even if the dither mode is set to something other than `ma_dither_mode_none`, it will be ignored for conversions where dithering is not needed. +Dithering is available for the following conversions: + + s16 -> u8 + s24 -> u8 + s32 -> u8 + f32 -> u8 + s24 -> s16 + s32 -> s16 + f32 -> s16 + +Note that it is not an error to pass something other than ma_dither_mode_none for conversions where dither is not used. It will just be ignored. + + + +Channel Conversion +================== +Channel conversion is used for channel rearrangement and conversion from one channel count to another. The `ma_channel_converter` API is used for channel +conversion. Below is an example of initializing a simple channel converter which converts from mono to stereo. + + ```c + ma_channel_converter_config config = ma_channel_converter_config_init(ma_format, 1, NULL, 2, NULL, ma_channel_mix_mode_default, NULL); + result = ma_channel_converter_init(&config, &converter); + if (result != MA_SUCCESS) { + // Error. + } + ``` + +To perform the conversion simply call `ma_channel_converter_process_pcm_frames()` like so: + + ```c + ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + // Error. + } + ``` + +It is up to the caller to ensure the output buffer is large enough to accomodate the new PCM frames. + +The only formats supported are `ma_format_s16` and `ma_format_f32`. If you need another format you need to convert your data manually which you can do with +`ma_pcm_convert()`, etc. + +Input and output PCM frames are always interleaved. Deinterleaved layouts are not supported. + + +Channel Mapping +--------------- +In addition to converting from one channel count to another, like the example above, The channel converter can also be used to rearrange channels. When +initializing the channel converter, you can optionally pass in channel maps for both the input and output frames. If the channel counts are the same, and each +channel map contains the same channel positions with the exception that they're in a different order, a simple shuffling of the channels will be performed. If, +however, there is not a 1:1 mapping of channel positions, or the channel counts differ, the input channels will be mixed based on a mixing mode which is +specified when initializing the `ma_channel_converter_config` object. + +When converting from mono to multi-channel, the mono channel is simply copied to each output channel. When going the other way around, the audio of each output +channel is simply averaged and copied to the mono channel. + +In more complicated cases blending is used. The `ma_channel_mix_mode_simple` mode will drop excess channels and silence extra channels. For example, converting +from 4 to 2 channels, the 3rd and 4th channels will be dropped, whereas converting from 2 to 4 channels will put silence into the 3rd and 4th channels. + +The `ma_channel_mix_mode_rectangle` mode uses spacial locality based on a rectangle to compute a simple distribution between input and output. Imagine sitting +in the middle of a room, with speakers on the walls representing channel positions. The MA_CHANNEL_FRONT_LEFT position can be thought of as being in the corner +of the front and left walls. + +Finally, the `ma_channel_mix_mode_custom_weights` mode can be used to use custom user-defined weights. Custom weights can be passed in as the last parameter of +`ma_channel_converter_config_init()`. + +Predefined channel maps can be retrieved with `ma_get_standard_channel_map()`. This takes a `ma_standard_channel_map` enum as it's first parameter, which can +be one of the following: + + |-----------------------------------|-----------------------------------------------------------| + | Name | Description | + |-----------------------------------|-----------------------------------------------------------| + | ma_standard_channel_map_default | Default channel map used by miniaudio. See below. | + | ma_standard_channel_map_microsoft | Channel map used by Microsoft's bitfield channel maps. | + | ma_standard_channel_map_alsa | Default ALSA channel map. | + | ma_standard_channel_map_rfc3551 | RFC 3551. Based on AIFF. | + | ma_standard_channel_map_flac | FLAC channel map. | + | ma_standard_channel_map_vorbis | Vorbis channel map. | + | ma_standard_channel_map_sound4 | FreeBSD's sound(4). | + | ma_standard_channel_map_sndio | sndio channel map. www.sndio.org/tips.html | + | ma_standard_channel_map_webaudio | https://webaudio.github.io/web-audio-api/#ChannelOrdering | + |-----------------------------------|-----------------------------------------------------------| + +Below are the channel maps used by default in miniaudio (ma_standard_channel_map_default): + + |---------------|------------------------------| + | Channel Count | Mapping | + |---------------|------------------------------| + | 1 (Mono) | 0: MA_CHANNEL_MONO | + |---------------|------------------------------| + | 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + |---------------|------------------------------| + | 3 | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + |---------------|------------------------------| + | 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + | | 3: MA_CHANNEL_BACK_CENTER | + |---------------|------------------------------| + | 5 | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + | | 3: MA_CHANNEL_BACK_LEFT | + | | 4: MA_CHANNEL_BACK_RIGHT | + |---------------|------------------------------| + | 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + | | 3: MA_CHANNEL_LFE | + | | 4: MA_CHANNEL_SIDE_LEFT | + | | 5: MA_CHANNEL_SIDE_RIGHT | + |---------------|------------------------------| + | 7 | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + | | 3: MA_CHANNEL_LFE | + | | 4: MA_CHANNEL_BACK_CENTER | + | | 4: MA_CHANNEL_SIDE_LEFT | + | | 5: MA_CHANNEL_SIDE_RIGHT | + |---------------|------------------------------| + | 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT | + | | 1: MA_CHANNEL_FRONT_RIGHT | + | | 2: MA_CHANNEL_FRONT_CENTER | + | | 3: MA_CHANNEL_LFE | + | | 4: MA_CHANNEL_BACK_LEFT | + | | 5: MA_CHANNEL_BACK_RIGHT | + | | 6: MA_CHANNEL_SIDE_LEFT | + | | 7: MA_CHANNEL_SIDE_RIGHT | + |---------------|------------------------------| + | Other | All channels set to 0. This | + | | is equivalent to the same | + | | mapping as the device. | + |---------------|------------------------------| + + + +Resampling +========== +Resampling is achieved with the `ma_resampler` object. To create a resampler object, do something like the following: + + ```c + ma_resampler_config config = ma_resampler_config_init(ma_format_s16, channels, sampleRateIn, sampleRateOut, ma_resample_algorithm_linear); + ma_resampler resampler; + ma_result result = ma_resampler_init(&config, &resampler); + if (result != MA_SUCCESS) { + // An error occurred... + } + ``` + +Do the following to uninitialize the resampler: + + ```c + ma_resampler_uninit(&resampler); + ``` + +The following example shows how data can be processed + + ```c + ma_uint64 frameCountIn = 1000; + ma_uint64 frameCountOut = 2000; + ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); + if (result != MA_SUCCESS) { + // An error occurred... + } + + // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number of output frames written. + ``` + +To initialize the resampler you first need to set up a config (`ma_resampler_config`) with `ma_resampler_config_init()`. You need to specify the sample format +you want to use, the number of channels, the input and output sample rate, and the algorithm. + +The sample format can be either `ma_format_s16` or `ma_format_f32`. If you need a different format you will need to perform pre- and post-conversions yourself +where necessary. Note that the format is the same for both input and output. The format cannot be changed after initialization. + +The resampler supports multiple channels and is always interleaved (both input and output). The channel count cannot be changed after initialization. + +The sample rates can be anything other than zero, and are always specified in hertz. They should be set to something like 44100, etc. The sample rate is the +only configuration property that can be changed after initialization. + +The miniaudio resampler supports multiple algorithms: + + |-----------|------------------------------| + | Algorithm | Enum Token | + |-----------|------------------------------| + | Linear | ma_resample_algorithm_linear | + | Speex | ma_resample_algorithm_speex | + |-----------|------------------------------| + +Because Speex is not public domain it is strictly opt-in and the code is stored in separate files. if you opt-in to the Speex backend you will need to consider +it's license, the text of which can be found in it's source files in "extras/speex_resampler". Details on how to opt-in to the Speex resampler is explained in +the Speex Resampler section below. + +The algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. De-interleaved processing is not supported. To process +frames, use `ma_resampler_process_pcm_frames()`. On input, this function takes the number of output frames you can fit in the output buffer and the number of +input frames contained in the input buffer. On output these variables contain the number of output frames that were written to the output buffer and the +number of input frames that were consumed in the process. You can pass in NULL for the input buffer in which case it will be treated as an infinitely large +buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated as seek. + +The sample rate can be changed dynamically on the fly. You can change this with explicit sample rates with `ma_resampler_set_rate()` and also with a decimal +ratio with `ma_resampler_set_rate_ratio()`. The ratio is in/out. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific number of frames. You can calculate this with +`ma_resampler_get_required_input_frame_count()`. Likewise, it's sometimes useful to know exactly how many frames would be output given a certain number of +input frames. You can do this with `ma_resampler_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the resampler introduces some latency. This can be retrieved in terms of both the input rate and the output rate +with `ma_resampler_get_input_latency()` and `ma_resampler_get_output_latency()`. + + +Resampling Algorithms +--------------------- +The choice of resampling algorithm depends on your situation and requirements. The linear resampler is the most efficient and has the least amount of latency, +but at the expense of poorer quality. The Speex resampler is higher quality, but slower with more latency. It also performs several heap allocations internally +for memory management. + + +Linear Resampling +----------------- +The linear resampler is the fastest, but comes at the expense of poorer quality. There is, however, some control over the quality of the linear resampler which +may make it a suitable option depending on your requirements. + +The linear resampler performs low-pass filtering before or after downsampling or upsampling, depending on the sample rates you're converting between. When +decreasing the sample rate, the low-pass filter will be applied before downsampling. When increasing the rate it will be performed after upsampling. By default +a fourth order low-pass filter will be applied. This can be configured via the `lpfOrder` configuration variable. Setting this to 0 will disable filtering. + +The low-pass filter has a cutoff frequency which defaults to half the sample rate of the lowest of the input and output sample rates (Nyquist Frequency). This +can be controlled with the `lpfNyquistFactor` config variable. This defaults to 1, and should be in the range of 0..1, although a value of 0 does not make +sense and should be avoided. A value of 1 will use the Nyquist Frequency as the cutoff. A value of 0.5 will use half the Nyquist Frequency as the cutoff, etc. +Values less than 1 will result in more washed out sound due to more of the higher frequencies being removed. This config variable has no impact on performance +and is a purely perceptual configuration. + +The API for the linear resampler is the same as the main resampler API, only it's called `ma_linear_resampler`. + + +Speex Resampling +---------------- +The Speex resampler is made up of third party code which is released under the BSD license. Because it is licensed differently to miniaudio, which is public +domain, it is strictly opt-in and all of it's code is stored in separate files. If you opt-in to the Speex resampler you must consider the license text in it's +source files. To opt-in, you must first #include the following file before the implementation of miniaudio.h: + + #include "extras/speex_resampler/ma_speex_resampler.h" + +Both the header and implementation is contained within the same file. The implementation can be included in your program like so: + + #define MINIAUDIO_SPEEX_RESAMPLER_IMPLEMENTATION + #include "extras/speex_resampler/ma_speex_resampler.h" + +Note that even if you opt-in to the Speex backend, miniaudio won't use it unless you explicitly ask for it in the respective config of the object you are +initializing. If you try to use the Speex resampler without opting in, initialization of the `ma_resampler` object will fail with `MA_NO_BACKEND`. + +The only configuration option to consider with the Speex resampler is the `speex.quality` config variable. This is a value between 0 and 10, with 0 being +the fastest with the poorest quality and 10 being the slowest with the highest quality. The default value is 3. + + + +General Data Conversion +======================= +The `ma_data_converter` API can be used to wrap sample format conversion, channel conversion and resampling into one operation. This is what miniaudio uses +internally to convert between the format requested when the device was initialized and the format of the backend's native device. The API for general data +conversion is very similar to the resampling API. Create a `ma_data_converter` object like this: + + ```c + ma_data_converter_config config = ma_data_converter_config_init(inputFormat, outputFormat, inputChannels, outputChannels, inputSampleRate, outputSampleRate); + ma_data_converter converter; + ma_result result = ma_data_converter_init(&config, &converter); + if (result != MA_SUCCESS) { + // An error occurred... + } + ``` + +In the example above we use `ma_data_converter_config_init()` to initialize the config, however there's many more properties that can be configured, such as +channel maps and resampling quality. Something like the following may be more suitable depending on your requirements: + + ```c + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = inputFormat; + config.formatOut = outputFormat; + config.channelsIn = inputChannels; + config.channelsOut = outputChannels; + config.sampleRateIn = inputSampleRate; + config.sampleRateOut = outputSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_flac, config.channelCountIn, config.channelMapIn); + config.resampling.linear.lpfOrder = MA_MAX_FILTER_ORDER; + ``` + +Do the following to uninitialize the data converter: + + ```c + ma_data_converter_uninit(&converter); + ``` + +The following example shows how data can be processed + + ```c + ma_uint64 frameCountIn = 1000; + ma_uint64 frameCountOut = 2000; + ma_result result = ma_data_converter_process_pcm_frames(&converter, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); + if (result != MA_SUCCESS) { + // An error occurred... + } + + // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number of output frames written. + ``` + +The data converter supports multiple channels and is always interleaved (both input and output). The channel count cannot be changed after initialization. + +Sample rates can be anything other than zero, and are always specified in hertz. They should be set to something like 44100, etc. The sample rate is the only +configuration property that can be changed after initialization, but only if the `resampling.allowDynamicSampleRate` member of `ma_data_converter_config` is +set to MA_TRUE. To change the sample rate, use `ma_data_converter_set_rate()` or `ma_data_converter_set_rate_ratio()`. The ratio must be in/out. The resampling +algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. De-interleaved processing is not supported. To process +frames, use `ma_data_converter_process_pcm_frames()`. On input, this function takes the number of output frames you can fit in the output buffer and the number +of input frames contained in the input buffer. On output these variables contain the number of output frames that were written to the output buffer and the +number of input frames that were consumed in the process. You can pass in NULL for the input buffer in which case it will be treated as an infinitely large +buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated as seek. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific number of frames. You can calculate this with +`ma_data_converter_get_required_input_frame_count()`. Likewise, it's sometimes useful to know exactly how many frames would be output given a certain number of +input frames. You can do this with `ma_data_converter_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the data converter introduces some latency if resampling is required. This can be retrieved in terms of both the +input rate and the output rate with `ma_data_converter_get_input_latency()` and `ma_data_converter_get_output_latency()`. + + + +Filtering +========= + +Biquad Filtering +---------------- +Biquad filtering is achieved with the `ma_biquad` API. Example: + + ```c + ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); + ma_result result = ma_biquad_init(&config, &biquad); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_biquad_process_pcm_frames(&biquad, pFramesOut, pFramesIn, frameCount); + ``` + +Biquad filtering is implemented using transposed direct form 2. The numerator coefficients are b0, b1 and b2, and the denominator coefficients are a0, a1 and +a2. The a0 coefficient is required and coefficients must not be pre-normalized. + +Supported formats are `ma_format_s16` and `ma_format_f32`. If you need to use a different format you need to convert it yourself beforehand. When using +`ma_format_s16` the biquad filter will use fixed point arithmetic. When using `ma_format_f32`, floating point arithmetic will be used. + +Input and output frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so: + + ```c + ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount); + ``` + +If you need to change the values of the coefficients, but maintain the values in the registers you can do so with `ma_biquad_reinit()`. This is useful if you +need to change the properties of the filter while keeping the values of registers valid to avoid glitching. Do not use `ma_biquad_init()` for this as it will +do a full initialization which involves clearing the registers to 0. Note that changing the format or channel count after initialization is invalid and will +result in an error. + + +Low-Pass Filtering +------------------ +Low-pass filtering is achieved with the following APIs: + + |---------|------------------------------------------| + | API | Description | + |---------|------------------------------------------| + | ma_lpf1 | First order low-pass filter | + | ma_lpf2 | Second order low-pass filter | + | ma_lpf | High order low-pass filter (Butterworth) | + |---------|------------------------------------------| + +Low-pass filter example: + + ```c + ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + ma_result result = ma_lpf_init(&config, &lpf); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_lpf_process_pcm_frames(&lpf, pFramesOut, pFramesIn, frameCount); + ``` + +Supported formats are `ma_format_s16` and` ma_format_f32`. If you need to use a different format you need to convert it yourself beforehand. Input and output +frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so: + + ```c + ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount); + ``` + +The maximum filter order is limited to MA_MAX_FILTER_ORDER which is set to 8. If you need more, you can chain first and second order filters together. + + ```c + for (iFilter = 0; iFilter < filterCount; iFilter += 1) { + ma_lpf2_process_pcm_frames(&lpf2[iFilter], pMyData, pMyData, frameCount); + } + ``` + +If you need to change the configuration of the filter, but need to maintain the state of internal registers you can do so with `ma_lpf_reinit()`. This may be +useful if you need to change the sample rate and/or cutoff frequency dynamically while maintaing smooth transitions. Note that changing the format or channel +count after initialization is invalid and will result in an error. + +The `ma_lpf` object supports a configurable order, but if you only need a first order filter you may want to consider using `ma_lpf1`. Likewise, if you only +need a second order filter you can use `ma_lpf2`. The advantage of this is that they're lighter weight and a bit more efficient. + +If an even filter order is specified, a series of second order filters will be processed in a chain. If an odd filter order is specified, a first order filter +will be applied, followed by a series of second order filters in a chain. + + +High-Pass Filtering +------------------- +High-pass filtering is achieved with the following APIs: + + |---------|-------------------------------------------| + | API | Description | + |---------|-------------------------------------------| + | ma_hpf1 | First order high-pass filter | + | ma_hpf2 | Second order high-pass filter | + | ma_hpf | High order high-pass filter (Butterworth) | + |---------|-------------------------------------------| + +High-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_hpf1`, `ma_hpf2` and `ma_hpf`. See example code for low-pass filters +for example usage. + + +Band-Pass Filtering +------------------- +Band-pass filtering is achieved with the following APIs: + + |---------|-------------------------------| + | API | Description | + |---------|-------------------------------| + | ma_bpf2 | Second order band-pass filter | + | ma_bpf | High order band-pass filter | + |---------|-------------------------------| + +Band-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_bpf2` and `ma_hpf`. See example code for low-pass filters for example +usage. Note that the order for band-pass filters must be an even number which means there is no first order band-pass filter, unlike low-pass and high-pass +filters. + + +Notch Filtering +--------------- +Notch filtering is achieved with the following APIs: + + |-----------|------------------------------------------| + | API | Description | + |-----------|------------------------------------------| + | ma_notch2 | Second order notching filter | + |-----------|------------------------------------------| + + +Peaking EQ Filtering +-------------------- +Peaking filtering is achieved with the following APIs: + + |----------|------------------------------------------| + | API | Description | + |----------|------------------------------------------| + | ma_peak2 | Second order peaking filter | + |----------|------------------------------------------| + + +Low Shelf Filtering +------------------- +Low shelf filtering is achieved with the following APIs: + + |-------------|------------------------------------------| + | API | Description | + |-------------|------------------------------------------| + | ma_loshelf2 | Second order low shelf filter | + |-------------|------------------------------------------| + +Where a high-pass filter is used to eliminate lower frequencies, a low shelf filter can be used to just turn them down rather than eliminate them entirely. + + +High Shelf Filtering +-------------------- +High shelf filtering is achieved with the following APIs: + + |-------------|------------------------------------------| + | API | Description | + |-------------|------------------------------------------| + | ma_hishelf2 | Second order high shelf filter | + |-------------|------------------------------------------| + +The high shelf filter has the same API as the low shelf filter, only you would use `ma_hishelf` instead of `ma_loshelf`. Where a low shelf filter is used to +adjust the volume of low frequencies, the high shelf filter does the same thing for high frequencies. + + + + +Waveform and Noise Generation +============================= + +Waveforms +--------- +miniaudio supports generation of sine, square, triangle and sawtooth waveforms. This is achieved with the `ma_waveform` API. Example: + + ```c + ma_waveform_config config = ma_waveform_config_init(FORMAT, CHANNELS, SAMPLE_RATE, ma_waveform_type_sine, amplitude, frequency); + + ma_waveform waveform; + ma_result result = ma_waveform_init(&config, &waveform); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_waveform_read_pcm_frames(&waveform, pOutput, frameCount); + ``` + +The amplitude, frequency and sample rate can be changed dynamically with `ma_waveform_set_amplitude()`, `ma_waveform_set_frequency()` and +`ma_waveform_set_sample_rate()` respectively. + +You can reverse the waveform by setting the amplitude to a negative value. You can use this to control whether or not a sawtooth has a positive or negative +ramp, for example. + +Below are the supported waveform types: + + |---------------------------| + | Enum Name | + |---------------------------| + | ma_waveform_type_sine | + | ma_waveform_type_square | + | ma_waveform_type_triangle | + | ma_waveform_type_sawtooth | + |---------------------------| + + + +Noise +----- +miniaudio supports generation of white, pink and brownian noise via the `ma_noise` API. Example: + + ```c + ma_noise_config config = ma_noise_config_init(FORMAT, CHANNELS, ma_noise_type_white, SEED, amplitude); + + ma_noise noise; + ma_result result = ma_noise_init(&config, &noise); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_noise_read_pcm_frames(&noise, pOutput, frameCount); + ``` + +The noise API uses simple LCG random number generation. It supports a custom seed which is useful for things like automated testing requiring reproducibility. +Setting the seed to zero will default to MA_DEFAULT_LCG_SEED. + +By default, the noise API will use different values for different channels. So, for example, the left side in a stereo stream will be different to the right +side. To instead have each channel use the same random value, set the `duplicateChannels` member of the noise config to true, like so: + + ```c + config.duplicateChannels = MA_TRUE; + ``` + +Below are the supported noise types. + + |------------------------| + | Enum Name | + |------------------------| + | ma_noise_type_white | + | ma_noise_type_pink | + | ma_noise_type_brownian | + |------------------------| + + + +Ring Buffers +============ +miniaudio supports lock free (single producer, single consumer) ring buffers which are exposed via the `ma_rb` and `ma_pcm_rb` APIs. The `ma_rb` API operates +on bytes, whereas the `ma_pcm_rb` operates on PCM frames. They are otherwise identical as `ma_pcm_rb` is just a wrapper around `ma_rb`. + +Unlike most other APIs in miniaudio, ring buffers support both interleaved and deinterleaved streams. The caller can also allocate their own backing memory for +the ring buffer to use internally for added flexibility. Otherwise the ring buffer will manage it's internal memory for you. + +The examples below use the PCM frame variant of the ring buffer since that's most likely the one you will want to use. To initialize a ring buffer, do +something like the following: + + ```c + ma_pcm_rb rb; + ma_result result = ma_pcm_rb_init(FORMAT, CHANNELS, BUFFER_SIZE_IN_FRAMES, NULL, NULL, &rb); + if (result != MA_SUCCESS) { + // Error + } + ``` + +The `ma_pcm_rb_init()` function takes the sample format and channel count as parameters because it's the PCM varient of the ring buffer API. For the regular +ring buffer that operates on bytes you would call `ma_rb_init()` which leaves these out and just takes the size of the buffer in bytes instead of frames. The +fourth parameter is an optional pre-allocated buffer and the fifth parameter is a pointer to a `ma_allocation_callbacks` structure for custom memory allocation +routines. Passing in NULL for this results in MA_MALLOC() and MA_FREE() being used. + +Use `ma_pcm_rb_init_ex()` if you need a deinterleaved buffer. The data for each sub-buffer is offset from each other based on the stride. To manage your sub- +buffers you can use `ma_pcm_rb_get_subbuffer_stride()`, `ma_pcm_rb_get_subbuffer_offset()` and `ma_pcm_rb_get_subbuffer_ptr()`. + +Use 'ma_pcm_rb_acquire_read()` and `ma_pcm_rb_acquire_write()` to retrieve a pointer to a section of the ring buffer. You specify the number of frames you +need, and on output it will set to what was actually acquired. If the read or write pointer is positioned such that the number of frames requested will require +a loop, it will be clamped to the end of the buffer. Therefore, the number of frames you're given may be less than the number you requested. + +After calling `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()`, you do your work on the buffer and then "commit" it with `ma_pcm_rb_commit_read()` or +`ma_pcm_rb_commit_write()`. This is where the read/write pointers are updated. When you commit you need to pass in the buffer that was returned by the earlier +call to `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()` and is only used for validation. The number of frames passed to `ma_pcm_rb_commit_read()` and +`ma_pcm_rb_commit_write()` is what's used to increment the pointers. + +If you want to correct for drift between the write pointer and the read pointer you can use a combination of `ma_pcm_rb_pointer_distance()`, +`ma_pcm_rb_seek_read()` and `ma_pcm_rb_seek_write()`. Note that you can only move the pointers forward, and you should only move the read pointer forward via +the consumer thread, and the write pointer forward by the producer thread. If there is too much space between the pointers, move the read pointer forward. If +there is too little space between the pointers, move the write pointer forward. + +You can use a ring buffer at the byte level instead of the PCM frame level by using the `ma_rb` API. This is exactly the sample, only you will use the `ma_rb` +functions instead of `ma_pcm_rb` and instead of frame counts you'll pass around byte counts. + +The maximum size of the buffer in bytes is 0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1) due to the most significant bit being used to encode a flag and the internally +managed buffers always being aligned to MA_SIMD_ALIGNMENT. + +Note that the ring buffer is only thread safe when used by a single consumer thread and single producer thread. + + + +Backends +======== +The following backends are supported by miniaudio. + + |-------------|-----------------------|--------------------------------------------------------| + | Name | Enum Name | Supported Operating Systems | + |-------------|-----------------------|--------------------------------------------------------| + | WASAPI | ma_backend_wasapi | Windows Vista+ | + | DirectSound | ma_backend_dsound | Windows XP+ | + | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) | + | Core Audio | ma_backend_coreaudio | macOS, iOS | + | ALSA | ma_backend_alsa | Linux | + | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | + | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | + | sndio | ma_backend_sndio | OpenBSD | + | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | + | OSS | ma_backend_oss | FreeBSD | + | AAudio | ma_backend_aaudio | Android 8+ | + | OpenSL|ES | ma_backend_opensl | Android (API level 16+) | + | Web Audio | ma_backend_webaudio | Web (via Emscripten) | + | Null | ma_backend_null | Cross Platform (not used on Web) | + |-------------|-----------------------|--------------------------------------------------------| + +Some backends have some nuance details you may want to be aware of. + +WASAPI +------ +- Low-latency shared mode will be disabled when using an application-defined sample rate which is different to the device's native sample rate. To work around + this, set wasapi.noAutoConvertSRC to true in the device config. This is due to IAudioClient3_InitializeSharedAudioStream() failing when the + AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM flag is specified. Setting wasapi.noAutoConvertSRC will result in miniaudio's lower quality internal resampler being used + instead which will in turn enable the use of low-latency shared mode. + +PulseAudio +---------- +- If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki: + https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling + Alternatively, consider using a different backend such as ALSA. + +Android +------- +- To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest: + +- With OpenSL|ES, only a single ma_context can be active at any given time. This is due to a limitation with OpenSL|ES. +- With AAudio, only default devices are enumerated. This is due to AAudio not having an enumeration API (devices are enumerated through Java). You can however + perform your own device enumeration through Java and then set the ID in the ma_device_id structure (ma_device_id.aaudio) and pass it to ma_device_init(). +- The backend API will perform resampling where possible. The reason for this as opposed to using miniaudio's built-in resampler is to take advantage of any + potential device-specific optimizations the driver may implement. + +UWP +--- +- UWP only supports default playback and capture devices. +- UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest): + + ... + + + + + +Web Audio / Emscripten +---------------------- +- You cannot use -std=c* compiler flags, nor -ansi. This only applies to the Emscripten build. +- The first time a context is initialized it will create a global object called "miniaudio" whose primary purpose is to act as a factory for device objects. +- Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as they've been deprecated. +- Google has implemented a policy in their browsers that prevent automatic media output without first receiving some kind of user input. The following web page + has additional details: https://developers.google.com/web/updates/2017/09/autoplay-policy-changes. Starting the device may fail if you try to start playback + without first handling some kind of user input. + + + +Miscellaneous Notes +=================== +- Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for WASAPI and Core Audio, however other backends such as + PulseAudio may naturally support it, though not all have been tested. +- The contents of the output buffer passed into the data callback will always be pre-initialized to zero unless the noPreZeroedOutputBuffer config variable in + ma_device_config is set to true, in which case it'll be undefined which will require you to write something to the entire buffer. +- By default miniaudio will automatically clip samples. This only applies when the playback sample format is configured as ma_format_f32. If you are doing + clipping yourself, you can disable this overhead by setting noClip to true in the device config. +- The sndio backend is currently only enabled on OpenBSD builds. +- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can use it. +- Note that GCC and Clang requires "-msse2", "-mavx2", etc. for SIMD optimizations. +*/ + +#ifndef miniaudio_h +#define miniaudio_h + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ + #pragma warning(disable:4324) /* structure was padded due to alignment specifier */ +#else + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ + #endif +#endif + +/* Platform/backend detection. */ +#ifdef _WIN32 + #define MA_WIN32 + #if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PC_APP || WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP) + #define MA_WIN32_UWP + #else + #define MA_WIN32_DESKTOP + #endif +#else + #define MA_POSIX + + /* We only use multi-threading with the device IO API, so no need to include these headers otherwise. */ +#if !defined(MA_NO_DEVICE_IO) + #include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */ + #include +#endif + + #ifdef __unix__ + #define MA_UNIX + #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) + #define MA_BSD + #endif + #endif + #ifdef __linux__ + #define MA_LINUX + #endif + #ifdef __APPLE__ + #define MA_APPLE + #endif + #ifdef __ANDROID__ + #define MA_ANDROID + #endif + #ifdef __EMSCRIPTEN__ + #define MA_EMSCRIPTEN + #endif +#endif + +#include /* For size_t. */ + +/* Sized types. Prefer built-in types. Fall back to stdint. */ +#ifdef _MSC_VER + #if defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wlanguage-extension-token" + #pragma GCC diagnostic ignored "-Wlong-long" + #pragma GCC diagnostic ignored "-Wc++11-long-long" + #endif + typedef signed __int8 ma_int8; + typedef unsigned __int8 ma_uint8; + typedef signed __int16 ma_int16; + typedef unsigned __int16 ma_uint16; + typedef signed __int32 ma_int32; + typedef unsigned __int32 ma_uint32; + typedef signed __int64 ma_int64; + typedef unsigned __int64 ma_uint64; + #if defined(__clang__) + #pragma GCC diagnostic pop + #endif +#else + #define MA_HAS_STDINT + #include + typedef int8_t ma_int8; + typedef uint8_t ma_uint8; + typedef int16_t ma_int16; + typedef uint16_t ma_uint16; + typedef int32_t ma_int32; + typedef uint32_t ma_uint32; + typedef int64_t ma_int64; + typedef uint64_t ma_uint64; +#endif + +#ifdef MA_HAS_STDINT + typedef uintptr_t ma_uintptr; +#else + #if defined(_WIN32) + #if defined(_WIN64) + typedef ma_uint64 ma_uintptr; + #else + typedef ma_uint32 ma_uintptr; + #endif + #elif defined(__GNUC__) + #if defined(__LP64__) + typedef ma_uint64 ma_uintptr; + #else + typedef ma_uint32 ma_uintptr; + #endif + #else + typedef ma_uint64 ma_uintptr; /* Fallback. */ + #endif +#endif + +typedef ma_uint8 ma_bool8; +typedef ma_uint32 ma_bool32; +#define MA_TRUE 1 +#define MA_FALSE 0 + +typedef void* ma_handle; +typedef void* ma_ptr; +typedef void (* ma_proc)(void); + +#if defined(_MSC_VER) && !defined(_WCHAR_T_DEFINED) +typedef ma_uint16 wchar_t; +#endif + +/* Define NULL for some compilers. */ +#ifndef NULL +#define NULL 0 +#endif + +#if defined(SIZE_MAX) + #define MA_SIZE_MAX SIZE_MAX +#else + #define MA_SIZE_MAX 0xFFFFFFFF /* When SIZE_MAX is not defined by the standard library just default to the maximum 32-bit unsigned integer. */ +#endif + + +#ifdef _MSC_VER + #define MA_INLINE __forceinline +#elif defined(__GNUC__) + /* + I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when + the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some + case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the + command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue + I am using "__inline__" only when we're compiling in strict ANSI mode. + */ + #if defined(__STRICT_ANSI__) + #define MA_INLINE __inline__ __attribute__((always_inline)) + #else + #define MA_INLINE inline __attribute__((always_inline)) + #endif +#else + #define MA_INLINE +#endif + +#if !defined(MA_API) + #if defined(MA_DLL) + #if defined(_WIN32) + #define MA_DLL_IMPORT __declspec(dllimport) + #define MA_DLL_EXPORT __declspec(dllexport) + #define MA_DLL_PRIVATE static + #else + #if defined(__GNUC__) && __GNUC__ >= 4 + #define MA_DLL_IMPORT __attribute__((visibility("default"))) + #define MA_DLL_EXPORT __attribute__((visibility("default"))) + #define MA_DLL_PRIVATE __attribute__((visibility("hidden"))) + #else + #define MA_DLL_IMPORT + #define MA_DLL_EXPORT + #define MA_DLL_PRIVATE static + #endif + #endif + + #if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) + #define MA_API MA_DLL_EXPORT + #else + #define MA_API MA_DLL_IMPORT + #endif + #define MA_PRIVATE MA_DLL_PRIVATE + #else + #define MA_API extern + #define MA_PRIVATE static + #endif +#endif + +/* SIMD alignment in bytes. Currently set to 64 bytes in preparation for future AVX-512 optimizations. */ +#define MA_SIMD_ALIGNMENT 64 + + +/* Logging levels */ +#define MA_LOG_LEVEL_VERBOSE 4 +#define MA_LOG_LEVEL_INFO 3 +#define MA_LOG_LEVEL_WARNING 2 +#define MA_LOG_LEVEL_ERROR 1 + +#ifndef MA_LOG_LEVEL +#define MA_LOG_LEVEL MA_LOG_LEVEL_ERROR +#endif + +typedef struct ma_context ma_context; +typedef struct ma_device ma_device; + +typedef ma_uint8 ma_channel; +#define MA_CHANNEL_NONE 0 +#define MA_CHANNEL_MONO 1 +#define MA_CHANNEL_FRONT_LEFT 2 +#define MA_CHANNEL_FRONT_RIGHT 3 +#define MA_CHANNEL_FRONT_CENTER 4 +#define MA_CHANNEL_LFE 5 +#define MA_CHANNEL_BACK_LEFT 6 +#define MA_CHANNEL_BACK_RIGHT 7 +#define MA_CHANNEL_FRONT_LEFT_CENTER 8 +#define MA_CHANNEL_FRONT_RIGHT_CENTER 9 +#define MA_CHANNEL_BACK_CENTER 10 +#define MA_CHANNEL_SIDE_LEFT 11 +#define MA_CHANNEL_SIDE_RIGHT 12 +#define MA_CHANNEL_TOP_CENTER 13 +#define MA_CHANNEL_TOP_FRONT_LEFT 14 +#define MA_CHANNEL_TOP_FRONT_CENTER 15 +#define MA_CHANNEL_TOP_FRONT_RIGHT 16 +#define MA_CHANNEL_TOP_BACK_LEFT 17 +#define MA_CHANNEL_TOP_BACK_CENTER 18 +#define MA_CHANNEL_TOP_BACK_RIGHT 19 +#define MA_CHANNEL_AUX_0 20 +#define MA_CHANNEL_AUX_1 21 +#define MA_CHANNEL_AUX_2 22 +#define MA_CHANNEL_AUX_3 23 +#define MA_CHANNEL_AUX_4 24 +#define MA_CHANNEL_AUX_5 25 +#define MA_CHANNEL_AUX_6 26 +#define MA_CHANNEL_AUX_7 27 +#define MA_CHANNEL_AUX_8 28 +#define MA_CHANNEL_AUX_9 29 +#define MA_CHANNEL_AUX_10 30 +#define MA_CHANNEL_AUX_11 31 +#define MA_CHANNEL_AUX_12 32 +#define MA_CHANNEL_AUX_13 33 +#define MA_CHANNEL_AUX_14 34 +#define MA_CHANNEL_AUX_15 35 +#define MA_CHANNEL_AUX_16 36 +#define MA_CHANNEL_AUX_17 37 +#define MA_CHANNEL_AUX_18 38 +#define MA_CHANNEL_AUX_19 39 +#define MA_CHANNEL_AUX_20 40 +#define MA_CHANNEL_AUX_21 41 +#define MA_CHANNEL_AUX_22 42 +#define MA_CHANNEL_AUX_23 43 +#define MA_CHANNEL_AUX_24 44 +#define MA_CHANNEL_AUX_25 45 +#define MA_CHANNEL_AUX_26 46 +#define MA_CHANNEL_AUX_27 47 +#define MA_CHANNEL_AUX_28 48 +#define MA_CHANNEL_AUX_29 49 +#define MA_CHANNEL_AUX_30 50 +#define MA_CHANNEL_AUX_31 51 +#define MA_CHANNEL_LEFT MA_CHANNEL_FRONT_LEFT +#define MA_CHANNEL_RIGHT MA_CHANNEL_FRONT_RIGHT +#define MA_CHANNEL_POSITION_COUNT (MA_CHANNEL_AUX_31 + 1) + + +typedef int ma_result; +#define MA_SUCCESS 0 +#define MA_ERROR -1 /* A generic error. */ +#define MA_INVALID_ARGS -2 +#define MA_INVALID_OPERATION -3 +#define MA_OUT_OF_MEMORY -4 +#define MA_OUT_OF_RANGE -5 +#define MA_ACCESS_DENIED -6 +#define MA_DOES_NOT_EXIST -7 +#define MA_ALREADY_EXISTS -8 +#define MA_TOO_MANY_OPEN_FILES -9 +#define MA_INVALID_FILE -10 +#define MA_TOO_BIG -11 +#define MA_PATH_TOO_LONG -12 +#define MA_NAME_TOO_LONG -13 +#define MA_NOT_DIRECTORY -14 +#define MA_IS_DIRECTORY -15 +#define MA_DIRECTORY_NOT_EMPTY -16 +#define MA_END_OF_FILE -17 +#define MA_NO_SPACE -18 +#define MA_BUSY -19 +#define MA_IO_ERROR -20 +#define MA_INTERRUPT -21 +#define MA_UNAVAILABLE -22 +#define MA_ALREADY_IN_USE -23 +#define MA_BAD_ADDRESS -24 +#define MA_BAD_SEEK -25 +#define MA_BAD_PIPE -26 +#define MA_DEADLOCK -27 +#define MA_TOO_MANY_LINKS -28 +#define MA_NOT_IMPLEMENTED -29 +#define MA_NO_MESSAGE -30 +#define MA_BAD_MESSAGE -31 +#define MA_NO_DATA_AVAILABLE -32 +#define MA_INVALID_DATA -33 +#define MA_TIMEOUT -34 +#define MA_NO_NETWORK -35 +#define MA_NOT_UNIQUE -36 +#define MA_NOT_SOCKET -37 +#define MA_NO_ADDRESS -38 +#define MA_BAD_PROTOCOL -39 +#define MA_PROTOCOL_UNAVAILABLE -40 +#define MA_PROTOCOL_NOT_SUPPORTED -41 +#define MA_PROTOCOL_FAMILY_NOT_SUPPORTED -42 +#define MA_ADDRESS_FAMILY_NOT_SUPPORTED -43 +#define MA_SOCKET_NOT_SUPPORTED -44 +#define MA_CONNECTION_RESET -45 +#define MA_ALREADY_CONNECTED -46 +#define MA_NOT_CONNECTED -47 +#define MA_CONNECTION_REFUSED -48 +#define MA_NO_HOST -49 +#define MA_IN_PROGRESS -50 +#define MA_CANCELLED -51 +#define MA_MEMORY_ALREADY_MAPPED -52 +#define MA_AT_END -53 + +/* General miniaudio-specific errors. */ +#define MA_FORMAT_NOT_SUPPORTED -100 +#define MA_DEVICE_TYPE_NOT_SUPPORTED -101 +#define MA_SHARE_MODE_NOT_SUPPORTED -102 +#define MA_NO_BACKEND -103 +#define MA_NO_DEVICE -104 +#define MA_API_NOT_FOUND -105 +#define MA_INVALID_DEVICE_CONFIG -106 + +/* State errors. */ +#define MA_DEVICE_NOT_INITIALIZED -200 +#define MA_DEVICE_ALREADY_INITIALIZED -201 +#define MA_DEVICE_NOT_STARTED -202 +#define MA_DEVICE_NOT_STOPPED -203 + +/* Operation errors. */ +#define MA_FAILED_TO_INIT_BACKEND -300 +#define MA_FAILED_TO_OPEN_BACKEND_DEVICE -301 +#define MA_FAILED_TO_START_BACKEND_DEVICE -302 +#define MA_FAILED_TO_STOP_BACKEND_DEVICE -303 + + +/* Standard sample rates. */ +#define MA_SAMPLE_RATE_8000 8000 +#define MA_SAMPLE_RATE_11025 11025 +#define MA_SAMPLE_RATE_16000 16000 +#define MA_SAMPLE_RATE_22050 22050 +#define MA_SAMPLE_RATE_24000 24000 +#define MA_SAMPLE_RATE_32000 32000 +#define MA_SAMPLE_RATE_44100 44100 +#define MA_SAMPLE_RATE_48000 48000 +#define MA_SAMPLE_RATE_88200 88200 +#define MA_SAMPLE_RATE_96000 96000 +#define MA_SAMPLE_RATE_176400 176400 +#define MA_SAMPLE_RATE_192000 192000 +#define MA_SAMPLE_RATE_352800 352800 +#define MA_SAMPLE_RATE_384000 384000 + +#define MA_MIN_CHANNELS 1 +#define MA_MAX_CHANNELS 32 +#define MA_MIN_SAMPLE_RATE MA_SAMPLE_RATE_8000 +#define MA_MAX_SAMPLE_RATE MA_SAMPLE_RATE_384000 + +#ifndef MA_MAX_FILTER_ORDER +#define MA_MAX_FILTER_ORDER 8 +#endif + +typedef enum +{ + ma_stream_format_pcm = 0 +} ma_stream_format; + +typedef enum +{ + ma_stream_layout_interleaved = 0, + ma_stream_layout_deinterleaved +} ma_stream_layout; + +typedef enum +{ + ma_dither_mode_none = 0, + ma_dither_mode_rectangle, + ma_dither_mode_triangle +} ma_dither_mode; + +typedef enum +{ + /* + I like to keep these explicitly defined because they're used as a key into a lookup table. When items are + added to this, make sure there are no gaps and that they're added to the lookup table in ma_get_bytes_per_sample(). + */ + ma_format_unknown = 0, /* Mainly used for indicating an error, but also used as the default for the output format for decoders. */ + ma_format_u8 = 1, + ma_format_s16 = 2, /* Seems to be the most widely supported format. */ + ma_format_s24 = 3, /* Tightly packed. 3 bytes per sample. */ + ma_format_s32 = 4, + ma_format_f32 = 5, + ma_format_count +} ma_format; + +typedef enum +{ + ma_channel_mix_mode_rectangular = 0, /* Simple averaging based on the plane(s) the channel is sitting on. */ + ma_channel_mix_mode_simple, /* Drop excess channels; zeroed out extra channels. */ + ma_channel_mix_mode_custom_weights, /* Use custom weights specified in ma_channel_router_config. */ + ma_channel_mix_mode_planar_blend = ma_channel_mix_mode_rectangular, + ma_channel_mix_mode_default = ma_channel_mix_mode_planar_blend +} ma_channel_mix_mode; + +typedef enum +{ + ma_standard_channel_map_microsoft, + ma_standard_channel_map_alsa, + ma_standard_channel_map_rfc3551, /* Based off AIFF. */ + ma_standard_channel_map_flac, + ma_standard_channel_map_vorbis, + ma_standard_channel_map_sound4, /* FreeBSD's sound(4). */ + ma_standard_channel_map_sndio, /* www.sndio.org/tips.html */ + ma_standard_channel_map_webaudio = ma_standard_channel_map_flac, /* https://webaudio.github.io/web-audio-api/#ChannelOrdering. Only 1, 2, 4 and 6 channels are defined, but can fill in the gaps with logical assumptions. */ + ma_standard_channel_map_default = ma_standard_channel_map_microsoft +} ma_standard_channel_map; + +typedef enum +{ + ma_performance_profile_low_latency = 0, + ma_performance_profile_conservative +} ma_performance_profile; + + +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} ma_allocation_callbacks; + + +/************************************************************************************************************************************************************** + +Biquad Filtering + +**************************************************************************************************************************************************************/ +typedef union +{ + float f32; + ma_int32 s32; +} ma_biquad_coefficient; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + double b0; + double b1; + double b2; + double a0; + double a1; + double a2; +} ma_biquad_config; + +MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient b0; + ma_biquad_coefficient b1; + ma_biquad_coefficient b2; + ma_biquad_coefficient a1; + ma_biquad_coefficient a2; + ma_biquad_coefficient r1[MA_MAX_CHANNELS]; + ma_biquad_coefficient r2[MA_MAX_CHANNELS]; +} ma_biquad; + +MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ); +MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ); +MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ); + + +/************************************************************************************************************************************************************** + +Low-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_lpf1_config, ma_lpf2_config; + +MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); +MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient r1[MA_MAX_CHANNELS]; +} ma_lpf1; + +MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, ma_lpf1* pLPF); +MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF); +MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf1_get_latency(ma_lpf1* pLPF); + +typedef struct +{ + ma_biquad bq; /* The second order low-pass filter is implemented as a biquad filter. */ +} ma_lpf2; + +MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, ma_lpf2* pLPF); +MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF); +MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf2_get_latency(ma_lpf2* pLPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_lpf_config; + +MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_lpf1 lpf1[1]; + ma_lpf2 lpf2[MA_MAX_FILTER_ORDER/2]; +} ma_lpf; + +MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF); +MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF); +MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF); + + +/************************************************************************************************************************************************************** + +High-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_hpf1_config, ma_hpf2_config; + +MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); +MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient r1[MA_MAX_CHANNELS]; +} ma_hpf1; + +MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, ma_hpf1* pHPF); +MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF); +MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf1_get_latency(ma_hpf1* pHPF); + +typedef struct +{ + ma_biquad bq; /* The second order high-pass filter is implemented as a biquad filter. */ +} ma_hpf2; + +MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, ma_hpf2* pHPF); +MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF); +MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf2_get_latency(ma_hpf2* pHPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_hpf_config; + +MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_hpf1 hpf1[1]; + ma_hpf2 hpf2[MA_MAX_FILTER_ORDER/2]; +} ma_hpf; + +MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, ma_hpf* pHPF); +MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF); +MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf_get_latency(ma_hpf* pHPF); + + +/************************************************************************************************************************************************************** + +Band-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_bpf2_config; + +MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_biquad bq; /* The second order band-pass filter is implemented as a biquad filter. */ +} ma_bpf2; + +MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, ma_bpf2* pBPF); +MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF); +MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_bpf2_get_latency(ma_bpf2* pBPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_bpf_config; + +MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 bpf2Count; + ma_bpf2 bpf2[MA_MAX_FILTER_ORDER/2]; +} ma_bpf; + +MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, ma_bpf* pBPF); +MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF); +MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_bpf_get_latency(ma_bpf* pBPF); + + +/************************************************************************************************************************************************************** + +Notching Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double q; + double frequency; +} ma_notch2_config; + +MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_notch2; + +MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, ma_notch2* pFilter); +MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter); +MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_notch2_get_latency(ma_notch2* pFilter); + + +/************************************************************************************************************************************************************** + +Peaking EQ Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double q; + double frequency; +} ma_peak2_config; + +MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_peak2; + +MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, ma_peak2* pFilter); +MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter); +MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_peak2_get_latency(ma_peak2* pFilter); + + +/************************************************************************************************************************************************************** + +Low Shelf Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; +} ma_loshelf2_config; + +MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_loshelf2; + +MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter); +MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter); +MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_loshelf2_get_latency(ma_loshelf2* pFilter); + + +/************************************************************************************************************************************************************** + +High Shelf Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; +} ma_hishelf2_config; + +MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_hishelf2; + +MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter); +MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter); +MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hishelf2_get_latency(ma_hishelf2* pFilter); + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DATA CONVERSION +=============== + +This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc. + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_uint32 lpfOrder; /* The low-pass filter order. Setting this to 0 will disable low-pass filtering. */ + double lpfNyquistFactor; /* 0..1. Defaults to 1. 1 = Half the sampling frequency (Nyquist Frequency), 0.5 = Quarter the sampling frequency (half Nyquest Frequency), etc. */ +} ma_linear_resampler_config; + +MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + +typedef struct +{ + ma_linear_resampler_config config; + ma_uint32 inAdvanceInt; + ma_uint32 inAdvanceFrac; + ma_uint32 inTimeInt; + ma_uint32 inTimeFrac; + union + { + float f32[MA_MAX_CHANNELS]; + ma_int16 s16[MA_MAX_CHANNELS]; + } x0; /* The previous input frame. */ + union + { + float f32[MA_MAX_CHANNELS]; + ma_int16 s16[MA_MAX_CHANNELS]; + } x1; /* The next input frame. */ + ma_lpf lpf; +} ma_linear_resampler; + +MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler); +MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler); +MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut); +MA_API ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount); +MA_API ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount); +MA_API ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler); +MA_API ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler); + +typedef enum +{ + ma_resample_algorithm_linear = 0, /* Fastest, lowest quality. Optional low-pass filtering. Default. */ + ma_resample_algorithm_speex +} ma_resample_algorithm; + +typedef struct +{ + ma_format format; /* Must be either ma_format_f32 or ma_format_s16. */ + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfOrder; + double lpfNyquistFactor; + } linear; + struct + { + int quality; /* 0 to 10. Defaults to 3. */ + } speex; +} ma_resampler_config; + +MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm); + +typedef struct +{ + ma_resampler_config config; + union + { + ma_linear_resampler linear; + struct + { + void* pSpeexResamplerState; /* SpeexResamplerState* */ + } speex; + } state; +} ma_resampler; + +/* +Initializes a new resampler object from a config. +*/ +MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler); + +/* +Uninitializes a resampler. +*/ +MA_API void ma_resampler_uninit(ma_resampler* pResampler); + +/* +Converts the given input data. + +Both the input and output frames must be in the format specified in the config when the resampler was initilized. + +On input, [pFrameCountOut] contains the number of output frames to process. On output it contains the number of output frames that +were actually processed, which may be less than the requested amount which will happen if there's not enough input data. You can use +ma_resampler_get_expected_output_frame_count() to know how many output frames will be processed for a given number of input frames. + +On input, [pFrameCountIn] contains the number of input frames contained in [pFramesIn]. On output it contains the number of whole +input frames that were actually processed. You can use ma_resampler_get_required_input_frame_count() to know how many input frames +you should provide for a given number of output frames. [pFramesIn] can be NULL, in which case zeroes will be used instead. + +If [pFramesOut] is NULL, a seek is performed. In this case, if [pFrameCountOut] is not NULL it will seek by the specified number of +output frames. Otherwise, if [pFramesCountOut] is NULL and [pFrameCountIn] is not NULL, it will seek by the specified number of input +frames. When seeking, [pFramesIn] is allowed to NULL, in which case the internal timing state will be updated, but no input will be +processed. In this case, any internal filter state will be updated as if zeroes were passed in. + +It is an error for [pFramesOut] to be non-NULL and [pFrameCountOut] to be NULL. + +It is an error for both [pFrameCountOut] and [pFrameCountIn] to be NULL. +*/ +MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + + +/* +Sets the input and output sample sample rate. +*/ +MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + +/* +Sets the input and output sample rate as a ratio. + +The ration is in/out. +*/ +MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio); + + +/* +Calculates the number of whole input frames that would need to be read from the client in order to output the specified +number of output frames. + +The returned value does not include cached input frames. It only returns the number of extra frames that would need to be +read from the input buffer in order to output the specified number of output frames. +*/ +MA_API ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount); + +/* +Calculates the number of whole output frames that would be output after fully reading and consuming the specified number of +input frames. +*/ +MA_API ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount); + + +/* +Retrieves the latency introduced by the resampler in input frames. +*/ +MA_API ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler); + +/* +Retrieves the latency introduced by the resampler in output frames. +*/ +MA_API ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler); + + + +/************************************************************************************************************************************************************** + +Channel Conversion + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_channel_mix_mode mixingMode; + float weights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ +} ma_channel_converter_config; + +MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode); + +typedef struct +{ + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_channel_mix_mode mixingMode; + union + { + float f32[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; + ma_int32 s16[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; + } weights; + ma_bool32 isPassthrough : 1; + ma_bool32 isSimpleShuffle : 1; + ma_bool32 isSimpleMonoExpansion : 1; + ma_bool32 isStereoToMono : 1; + ma_uint8 shuffleTable[MA_MAX_CHANNELS]; +} ma_channel_converter; + +MA_API ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, ma_channel_converter* pConverter); +MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter); +MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); + + +/************************************************************************************************************************************************************** + +Data Conversion + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_dither_mode ditherMode; + ma_channel_mix_mode channelMixMode; + float channelWeights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when channelMixMode is set to ma_channel_mix_mode_custom_weights. */ + struct + { + ma_resample_algorithm algorithm; + ma_bool32 allowDynamicSampleRate; + struct + { + ma_uint32 lpfOrder; + double lpfNyquistFactor; + } linear; + struct + { + int quality; + } speex; + } resampling; +} ma_data_converter_config; + +MA_API ma_data_converter_config ma_data_converter_config_init_default(void); +MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + +typedef struct +{ + ma_data_converter_config config; + ma_channel_converter channelConverter; + ma_resampler resampler; + ma_bool32 hasPreFormatConversion : 1; + ma_bool32 hasPostFormatConversion : 1; + ma_bool32 hasChannelConverter : 1; + ma_bool32 hasResampler : 1; + ma_bool32 isPassthrough : 1; +} ma_data_converter; + +MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter); +MA_API void ma_data_converter_uninit(ma_data_converter* pConverter); +MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut); +MA_API ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount); +MA_API ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount); +MA_API ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter); +MA_API ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter); + + +/************************************************************************************************************************************************************ + +Format Conversion + +************************************************************************************************************************************************************/ +MA_API void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode); +MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode); + +/* +Deinterleaves an interleaved buffer. +*/ +MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames); + +/* +Interleaves a group of deinterleaved buffers. +*/ +MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames); + +/************************************************************************************************************************************************************ + +Channel Maps + +************************************************************************************************************************************************************/ + +/* +Helper for retrieving a standard channel map. +*/ +MA_API void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Copies a channel map. +*/ +MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels); + + +/* +Determines whether or not a channel map is valid. + +A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but +is usually treated as a passthrough. + +Invalid channel maps: + - A channel map with no channels + - A channel map with more than one channel and a mono channel +*/ +MA_API ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Helper for comparing two channel maps for equality. + +This assumes the channel count is the same between the two. +*/ +MA_API ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]); + +/* +Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE). +*/ +MA_API ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Helper for determining whether or not a channel is present in the given channel map. +*/ +MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition); + + +/************************************************************************************************************************************************************ + +Conversion Helpers + +************************************************************************************************************************************************************/ + +/* +High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to +determine the required size of the output buffer. frameCountOut should be set to the capacity of pOut. If pOut is NULL, frameCountOut is +ignored. + +A return value of 0 indicates an error. + +This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_data_converter APIs instead. +*/ +MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn); +MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig); + + +/************************************************************************************************************************************************************ + +Ring Buffer + +************************************************************************************************************************************************************/ +typedef struct +{ + void* pBuffer; + ma_uint32 subbufferSizeInBytes; + ma_uint32 subbufferCount; + ma_uint32 subbufferStrideInBytes; + volatile ma_uint32 encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ + volatile ma_uint32 encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ + ma_bool32 ownsBuffer : 1; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */ + ma_bool32 clearOnWriteAcquire : 1; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */ + ma_allocation_callbacks allocationCallbacks; +} ma_rb; + +MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +MA_API void ma_rb_uninit(ma_rb* pRB); +MA_API void ma_rb_reset(ma_rb* pRB); +MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); +MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); +MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes); +MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes); +MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */ +MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB); +MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex); +MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer); + + +typedef struct +{ + ma_rb rb; + ma_format format; + ma_uint32 channels; +} ma_pcm_rb; + +MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB); +MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB); +MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); +MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); +MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB); /* Return value is in frames. */ +MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex); +MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer); + + +/************************************************************************************************************************************************************ + +Miscellaneous Helpers + +************************************************************************************************************************************************************/ +/* +Retrieves a human readable description of the given result code. +*/ +MA_API const char* ma_result_description(ma_result result); + +/* +malloc(). Calls MA_MALLOC(). +*/ +MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +realloc(). Calls MA_REALLOC(). +*/ +MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +free(). Calls MA_FREE(). +*/ +MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Performs an aligned malloc, with the assumption that the alignment is a power of 2. +*/ +MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Free's an aligned malloc'd buffer. +*/ +MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Retrieves a friendly name for a format. +*/ +MA_API const char* ma_get_format_name(ma_format format); + +/* +Blends two frames in floating point format. +*/ +MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels); + +/* +Retrieves the size of a sample in bytes for the given format. + +This API is efficient and is implemented using a lookup table. + +Thread Safety: SAFE + This API is pure. +*/ +MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format); +static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; } + +/* +Converts a log level to a string. +*/ +MA_API const char* ma_log_level_to_string(ma_uint32 logLevel); + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc. + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#ifndef MA_NO_DEVICE_IO +/* Some backends are only supported on certain platforms. */ +#if defined(MA_WIN32) + #define MA_SUPPORT_WASAPI + #if defined(MA_WIN32_DESKTOP) /* DirectSound and WinMM backends are only supported on desktops. */ + #define MA_SUPPORT_DSOUND + #define MA_SUPPORT_WINMM + #define MA_SUPPORT_JACK /* JACK is technically supported on Windows, but I don't know how many people use it in practice... */ + #endif +#endif +#if defined(MA_UNIX) + #if defined(MA_LINUX) + #if !defined(MA_ANDROID) /* ALSA is not supported on Android. */ + #define MA_SUPPORT_ALSA + #endif + #endif + #if !defined(MA_BSD) && !defined(MA_ANDROID) && !defined(MA_EMSCRIPTEN) + #define MA_SUPPORT_PULSEAUDIO + #define MA_SUPPORT_JACK + #endif + #if defined(MA_ANDROID) + #define MA_SUPPORT_AAUDIO + #define MA_SUPPORT_OPENSL + #endif + #if defined(__OpenBSD__) /* <-- Change this to "#if defined(MA_BSD)" to enable sndio on all BSD flavors. */ + #define MA_SUPPORT_SNDIO /* sndio is only supported on OpenBSD for now. May be expanded later if there's demand. */ + #endif + #if defined(__NetBSD__) || defined(__OpenBSD__) + #define MA_SUPPORT_AUDIO4 /* Only support audio(4) on platforms with known support. */ + #endif + #if defined(__FreeBSD__) || defined(__DragonFly__) + #define MA_SUPPORT_OSS /* Only support OSS on specific platforms with known support. */ + #endif +#endif +#if defined(MA_APPLE) + #define MA_SUPPORT_COREAUDIO +#endif +#if defined(MA_EMSCRIPTEN) + #define MA_SUPPORT_WEBAUDIO +#endif + +/* Explicitly disable the Null backend for Emscripten because it uses a background thread which is not properly supported right now. */ +#if !defined(MA_EMSCRIPTEN) +#define MA_SUPPORT_NULL +#endif + + +#if !defined(MA_NO_WASAPI) && defined(MA_SUPPORT_WASAPI) + #define MA_ENABLE_WASAPI +#endif +#if !defined(MA_NO_DSOUND) && defined(MA_SUPPORT_DSOUND) + #define MA_ENABLE_DSOUND +#endif +#if !defined(MA_NO_WINMM) && defined(MA_SUPPORT_WINMM) + #define MA_ENABLE_WINMM +#endif +#if !defined(MA_NO_ALSA) && defined(MA_SUPPORT_ALSA) + #define MA_ENABLE_ALSA +#endif +#if !defined(MA_NO_PULSEAUDIO) && defined(MA_SUPPORT_PULSEAUDIO) + #define MA_ENABLE_PULSEAUDIO +#endif +#if !defined(MA_NO_JACK) && defined(MA_SUPPORT_JACK) + #define MA_ENABLE_JACK +#endif +#if !defined(MA_NO_COREAUDIO) && defined(MA_SUPPORT_COREAUDIO) + #define MA_ENABLE_COREAUDIO +#endif +#if !defined(MA_NO_SNDIO) && defined(MA_SUPPORT_SNDIO) + #define MA_ENABLE_SNDIO +#endif +#if !defined(MA_NO_AUDIO4) && defined(MA_SUPPORT_AUDIO4) + #define MA_ENABLE_AUDIO4 +#endif +#if !defined(MA_NO_OSS) && defined(MA_SUPPORT_OSS) + #define MA_ENABLE_OSS +#endif +#if !defined(MA_NO_AAUDIO) && defined(MA_SUPPORT_AAUDIO) + #define MA_ENABLE_AAUDIO +#endif +#if !defined(MA_NO_OPENSL) && defined(MA_SUPPORT_OPENSL) + #define MA_ENABLE_OPENSL +#endif +#if !defined(MA_NO_WEBAUDIO) && defined(MA_SUPPORT_WEBAUDIO) + #define MA_ENABLE_WEBAUDIO +#endif +#if !defined(MA_NO_NULL) && defined(MA_SUPPORT_NULL) + #define MA_ENABLE_NULL +#endif + +#ifdef MA_SUPPORT_WASAPI +/* We need a IMMNotificationClient object for WASAPI. */ +typedef struct +{ + void* lpVtbl; + ma_uint32 counter; + ma_device* pDevice; +} ma_IMMNotificationClient; +#endif + +/* Backend enums must be in priority order. */ +typedef enum +{ + ma_backend_wasapi, + ma_backend_dsound, + ma_backend_winmm, + ma_backend_coreaudio, + ma_backend_sndio, + ma_backend_audio4, + ma_backend_oss, + ma_backend_pulseaudio, + ma_backend_alsa, + ma_backend_jack, + ma_backend_aaudio, + ma_backend_opensl, + ma_backend_webaudio, + ma_backend_null /* <-- Must always be the last item. Lowest priority, and used as the terminator for backend enumeration. */ +} ma_backend; + +/* Thread priorties should be ordered such that the default priority of the worker thread is 0. */ +typedef enum +{ + ma_thread_priority_idle = -5, + ma_thread_priority_lowest = -4, + ma_thread_priority_low = -3, + ma_thread_priority_normal = -2, + ma_thread_priority_high = -1, + ma_thread_priority_highest = 0, + ma_thread_priority_realtime = 1, + ma_thread_priority_default = 0 +} ma_thread_priority; + +typedef struct +{ + ma_context* pContext; + + union + { +#ifdef MA_WIN32 + struct + { + /*HANDLE*/ ma_handle hThread; + } win32; +#endif +#ifdef MA_POSIX + struct + { + pthread_t thread; + } posix; +#endif + int _unused; + }; +} ma_thread; + +typedef struct +{ + ma_context* pContext; + + union + { +#ifdef MA_WIN32 + struct + { + /*HANDLE*/ ma_handle hMutex; + } win32; +#endif +#ifdef MA_POSIX + struct + { + pthread_mutex_t mutex; + } posix; +#endif + int _unused; + }; +} ma_mutex; + +typedef struct +{ + ma_context* pContext; + + union + { +#ifdef MA_WIN32 + struct + { + /*HANDLE*/ ma_handle hEvent; + } win32; +#endif +#ifdef MA_POSIX + struct + { + pthread_mutex_t mutex; + pthread_cond_t condition; + ma_uint32 value; + } posix; +#endif + int _unused; + }; +} ma_event; + +typedef struct +{ + ma_context* pContext; + + union + { +#ifdef MA_WIN32 + struct + { + /*HANDLE*/ ma_handle hSemaphore; + } win32; +#endif +#ifdef MA_POSIX + struct + { + sem_t semaphore; + } posix; +#endif + int _unused; + }; +} ma_semaphore; + + +/* +The callback for processing audio data from the device. + +The data callback is fired by miniaudio whenever the device needs to have more data delivered to a playback device, or when a capture device has some data +available. This is called as soon as the backend asks for more data which means it may be called with inconsistent frame counts. You cannot assume the +callback will be fired with a consistent frame count. + + +Parameters +---------- +pDevice (in) + A pointer to the relevant device. + +pOutput (out) + A pointer to the output buffer that will receive audio data that will later be played back through the speakers. This will be non-null for a playback or + full-duplex device and null for a capture and loopback device. + +pInput (in) + A pointer to the buffer containing input data from a recording device. This will be non-null for a capture, full-duplex or loopback device and null for a + playback device. + +frameCount (in) + The number of PCM frames to process. Note that this will not necessarily be equal to what you requested when you initialized the device. The + `periodSizeInFrames` and `periodSizeInMilliseconds` members of the device config are just hints, and are not necessarily exactly what you'll get. You must + not assume this will always be the same value each time the callback is fired. + + +Remarks +------- +You cannot stop and start the device from inside the callback or else you'll get a deadlock. You must also not uninitialize the device from inside the +callback. The following APIs cannot be called from inside the callback: + + ma_device_init() + ma_device_init_ex() + ma_device_uninit() + ma_device_start() + ma_device_stop() + +The proper way to stop the device is to call `ma_device_stop()` from a different thread, normally the main application thread. +*/ +typedef void (* ma_device_callback_proc)(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + +/* +The callback for when the device has been stopped. + +This will be called when the device is stopped explicitly with `ma_device_stop()` and also called implicitly when the device is stopped through external forces +such as being unplugged or an internal error occuring. + + +Parameters +---------- +pDevice (in) + A pointer to the device that has just stopped. + + +Remarks +------- +Do not restart or uninitialize the device from the callback. +*/ +typedef void (* ma_stop_proc)(ma_device* pDevice); + +/* +The callback for handling log messages. + + +Parameters +---------- +pContext (in) + A pointer to the context the log message originated from. + +pDevice (in) + A pointer to the device the log message originate from, if any. This can be null, in which case the message came from the context. + +logLevel (in) + The log level. This can be one of the following: + + |----------------------| + | Log Level | + |----------------------| + | MA_LOG_LEVEL_VERBOSE | + | MA_LOG_LEVEL_INFO | + | MA_LOG_LEVEL_WARNING | + | MA_LOG_LEVEL_ERROR | + |----------------------| + +message (in) + The log message. + + +Remarks +------- +Do not modify the state of the device from inside the callback. +*/ +typedef void (* ma_log_proc)(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message); + +typedef enum +{ + ma_device_type_playback = 1, + ma_device_type_capture = 2, + ma_device_type_duplex = ma_device_type_playback | ma_device_type_capture, /* 3 */ + ma_device_type_loopback = 4 +} ma_device_type; + +typedef enum +{ + ma_share_mode_shared = 0, + ma_share_mode_exclusive +} ma_share_mode; + +/* iOS/tvOS/watchOS session categories. */ +typedef enum +{ + ma_ios_session_category_default = 0, /* AVAudioSessionCategoryPlayAndRecord with AVAudioSessionCategoryOptionDefaultToSpeaker. */ + ma_ios_session_category_none, /* Leave the session category unchanged. */ + ma_ios_session_category_ambient, /* AVAudioSessionCategoryAmbient */ + ma_ios_session_category_solo_ambient, /* AVAudioSessionCategorySoloAmbient */ + ma_ios_session_category_playback, /* AVAudioSessionCategoryPlayback */ + ma_ios_session_category_record, /* AVAudioSessionCategoryRecord */ + ma_ios_session_category_play_and_record, /* AVAudioSessionCategoryPlayAndRecord */ + ma_ios_session_category_multi_route /* AVAudioSessionCategoryMultiRoute */ +} ma_ios_session_category; + +/* iOS/tvOS/watchOS session category options */ +typedef enum +{ + ma_ios_session_category_option_mix_with_others = 0x01, /* AVAudioSessionCategoryOptionMixWithOthers */ + ma_ios_session_category_option_duck_others = 0x02, /* AVAudioSessionCategoryOptionDuckOthers */ + ma_ios_session_category_option_allow_bluetooth = 0x04, /* AVAudioSessionCategoryOptionAllowBluetooth */ + ma_ios_session_category_option_default_to_speaker = 0x08, /* AVAudioSessionCategoryOptionDefaultToSpeaker */ + ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others = 0x11, /* AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */ + ma_ios_session_category_option_allow_bluetooth_a2dp = 0x20, /* AVAudioSessionCategoryOptionAllowBluetoothA2DP */ + ma_ios_session_category_option_allow_air_play = 0x40, /* AVAudioSessionCategoryOptionAllowAirPlay */ +} ma_ios_session_category_option; + +typedef union +{ + ma_int64 counter; + double counterD; +} ma_timer; + +typedef union +{ + wchar_t wasapi[64]; /* WASAPI uses a wchar_t string for identification. */ + ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */ + /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */ + char alsa[256]; /* ALSA uses a name string for identification. */ + char pulse[256]; /* PulseAudio uses a name string for identification. */ + int jack; /* JACK always uses default devices. */ + char coreaudio[256]; /* Core Audio uses a string for identification. */ + char sndio[256]; /* "snd/0", etc. */ + char audio4[256]; /* "/dev/audio", etc. */ + char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */ + ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */ + ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */ + char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */ + int nullbackend; /* The null backend uses an integer for device IDs. */ +} ma_device_id; + +typedef struct +{ + /* Basic info. This is the only information guaranteed to be filled in during device enumeration. */ + ma_device_id id; + char name[256]; + + /* + Detailed info. As much of this is filled as possible with ma_context_get_device_info(). Note that you are allowed to initialize + a device with settings outside of this range, but it just means the data will be converted using miniaudio's data conversion + pipeline before sending the data to/from the device. Most programs will need to not worry about these values, but it's provided + here mainly for informational purposes or in the rare case that someone might find it useful. + + These will be set to 0 when returned by ma_context_enumerate_devices() or ma_context_get_devices(). + */ + ma_uint32 formatCount; + ma_format formats[ma_format_count]; + ma_uint32 minChannels; + ma_uint32 maxChannels; + ma_uint32 minSampleRate; + ma_uint32 maxSampleRate; + + struct + { + ma_bool32 isDefault; + } _private; +} ma_device_info; + +typedef struct +{ + ma_device_type deviceType; + ma_uint32 sampleRate; + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInMilliseconds; + ma_uint32 periods; + ma_performance_profile performanceProfile; + ma_bool32 noPreZeroedOutputBuffer; /* When set to true, the contents of the output buffer passed into the data callback will be left undefined rather than initialized to zero. */ + ma_bool32 noClip; /* When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. Only applies when the playback sample format is f32. */ + ma_device_callback_proc dataCallback; + ma_stop_proc stopCallback; + void* pUserData; + struct + { + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfOrder; + } linear; + struct + { + int quality; + } speex; + } resampling; + struct + { + ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_share_mode shareMode; + } playback; + struct + { + ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_share_mode shareMode; + } capture; + + struct + { + ma_bool32 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool32 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool32 noAutoStreamRouting; /* Disables automatic stream routing. */ + ma_bool32 noHardwareOffloading; /* Disables WASAPI's hardware offloading feature. */ + } wasapi; + struct + { + ma_bool32 noMMap; /* Disables MMap mode. */ + ma_bool32 noAutoFormat; /* Opens the ALSA device with SND_PCM_NO_AUTO_FORMAT. */ + ma_bool32 noAutoChannels; /* Opens the ALSA device with SND_PCM_NO_AUTO_CHANNELS. */ + ma_bool32 noAutoResample; /* Opens the ALSA device with SND_PCM_NO_AUTO_RESAMPLE. */ + } alsa; + struct + { + const char* pStreamNamePlayback; + const char* pStreamNameCapture; + } pulse; +} ma_device_config; + +typedef struct +{ + ma_log_proc logCallback; + ma_thread_priority threadPriority; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + struct + { + ma_bool32 useVerboseDeviceEnumeration; + } alsa; + struct + { + const char* pApplicationName; + const char* pServerName; + ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */ + } pulse; + struct + { + ma_ios_session_category sessionCategory; + ma_uint32 sessionCategoryOptions; + } coreaudio; + struct + { + const char* pClientName; + ma_bool32 tryStartServer; + } jack; +} ma_context_config; + +/* +The callback for handling device enumeration. This is fired from `ma_context_enumerated_devices()`. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +deviceType (in) + The type of the device being enumerated. This will always be either `ma_device_type_playback` or `ma_device_type_capture`. + +pInfo (in) + A pointer to a `ma_device_info` containing the ID and name of the enumerated device. Note that this will not include detailed information about the device, + only basic information (ID and name). The reason for this is that it would otherwise require opening the backend device to probe for the information which + is too inefficient. + +pUserData (in) + The user data pointer passed into `ma_context_enumerate_devices()`. +*/ +typedef ma_bool32 (* ma_enum_devices_callback_proc)(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData); + +struct ma_context +{ + ma_backend backend; /* DirectSound, ALSA, etc. */ + ma_log_proc logCallback; + ma_thread_priority threadPriority; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */ + ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */ + ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */ + ma_uint32 playbackDeviceInfoCount; + ma_uint32 captureDeviceInfoCount; + ma_device_info* pDeviceInfos; /* Playback devices first, then capture. */ + ma_bool32 isBackendAsynchronous : 1; /* Set when the context is initialized. Set to 1 for asynchronous backends such as Core Audio and JACK. Do not modify. */ + + ma_result (* onUninit )(ma_context* pContext); + ma_bool32 (* onDeviceIDEqual )(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1); + ma_result (* onEnumDevices )(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); /* Return false from the callback to stop enumeration. */ + ma_result (* onGetDeviceInfo )(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo); + ma_result (* onDeviceInit )(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice); + void (* onDeviceUninit )(ma_device* pDevice); + ma_result (* onDeviceStart )(ma_device* pDevice); + ma_result (* onDeviceStop )(ma_device* pDevice); + ma_result (* onDeviceMainLoop)(ma_device* pDevice); + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + int _unused; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + ma_handle hDSoundDLL; + ma_proc DirectSoundCreate; + ma_proc DirectSoundEnumerateA; + ma_proc DirectSoundCaptureCreate; + ma_proc DirectSoundCaptureEnumerateA; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + ma_handle hWinMM; + ma_proc waveOutGetNumDevs; + ma_proc waveOutGetDevCapsA; + ma_proc waveOutOpen; + ma_proc waveOutClose; + ma_proc waveOutPrepareHeader; + ma_proc waveOutUnprepareHeader; + ma_proc waveOutWrite; + ma_proc waveOutReset; + ma_proc waveInGetNumDevs; + ma_proc waveInGetDevCapsA; + ma_proc waveInOpen; + ma_proc waveInClose; + ma_proc waveInPrepareHeader; + ma_proc waveInUnprepareHeader; + ma_proc waveInAddBuffer; + ma_proc waveInStart; + ma_proc waveInReset; + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + ma_handle asoundSO; + ma_proc snd_pcm_open; + ma_proc snd_pcm_close; + ma_proc snd_pcm_hw_params_sizeof; + ma_proc snd_pcm_hw_params_any; + ma_proc snd_pcm_hw_params_set_format; + ma_proc snd_pcm_hw_params_set_format_first; + ma_proc snd_pcm_hw_params_get_format_mask; + ma_proc snd_pcm_hw_params_set_channels_near; + ma_proc snd_pcm_hw_params_set_rate_resample; + ma_proc snd_pcm_hw_params_set_rate_near; + ma_proc snd_pcm_hw_params_set_buffer_size_near; + ma_proc snd_pcm_hw_params_set_periods_near; + ma_proc snd_pcm_hw_params_set_access; + ma_proc snd_pcm_hw_params_get_format; + ma_proc snd_pcm_hw_params_get_channels; + ma_proc snd_pcm_hw_params_get_channels_min; + ma_proc snd_pcm_hw_params_get_channels_max; + ma_proc snd_pcm_hw_params_get_rate; + ma_proc snd_pcm_hw_params_get_rate_min; + ma_proc snd_pcm_hw_params_get_rate_max; + ma_proc snd_pcm_hw_params_get_buffer_size; + ma_proc snd_pcm_hw_params_get_periods; + ma_proc snd_pcm_hw_params_get_access; + ma_proc snd_pcm_hw_params; + ma_proc snd_pcm_sw_params_sizeof; + ma_proc snd_pcm_sw_params_current; + ma_proc snd_pcm_sw_params_get_boundary; + ma_proc snd_pcm_sw_params_set_avail_min; + ma_proc snd_pcm_sw_params_set_start_threshold; + ma_proc snd_pcm_sw_params_set_stop_threshold; + ma_proc snd_pcm_sw_params; + ma_proc snd_pcm_format_mask_sizeof; + ma_proc snd_pcm_format_mask_test; + ma_proc snd_pcm_get_chmap; + ma_proc snd_pcm_state; + ma_proc snd_pcm_prepare; + ma_proc snd_pcm_start; + ma_proc snd_pcm_drop; + ma_proc snd_pcm_drain; + ma_proc snd_device_name_hint; + ma_proc snd_device_name_get_hint; + ma_proc snd_card_get_index; + ma_proc snd_device_name_free_hint; + ma_proc snd_pcm_mmap_begin; + ma_proc snd_pcm_mmap_commit; + ma_proc snd_pcm_recover; + ma_proc snd_pcm_readi; + ma_proc snd_pcm_writei; + ma_proc snd_pcm_avail; + ma_proc snd_pcm_avail_update; + ma_proc snd_pcm_wait; + ma_proc snd_pcm_info; + ma_proc snd_pcm_info_sizeof; + ma_proc snd_pcm_info_get_name; + ma_proc snd_config_update_free_global; + + ma_mutex internalDeviceEnumLock; + ma_bool32 useVerboseDeviceEnumeration; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + ma_handle pulseSO; + ma_proc pa_mainloop_new; + ma_proc pa_mainloop_free; + ma_proc pa_mainloop_get_api; + ma_proc pa_mainloop_iterate; + ma_proc pa_mainloop_wakeup; + ma_proc pa_context_new; + ma_proc pa_context_unref; + ma_proc pa_context_connect; + ma_proc pa_context_disconnect; + ma_proc pa_context_set_state_callback; + ma_proc pa_context_get_state; + ma_proc pa_context_get_sink_info_list; + ma_proc pa_context_get_source_info_list; + ma_proc pa_context_get_sink_info_by_name; + ma_proc pa_context_get_source_info_by_name; + ma_proc pa_operation_unref; + ma_proc pa_operation_get_state; + ma_proc pa_channel_map_init_extend; + ma_proc pa_channel_map_valid; + ma_proc pa_channel_map_compatible; + ma_proc pa_stream_new; + ma_proc pa_stream_unref; + ma_proc pa_stream_connect_playback; + ma_proc pa_stream_connect_record; + ma_proc pa_stream_disconnect; + ma_proc pa_stream_get_state; + ma_proc pa_stream_get_sample_spec; + ma_proc pa_stream_get_channel_map; + ma_proc pa_stream_get_buffer_attr; + ma_proc pa_stream_set_buffer_attr; + ma_proc pa_stream_get_device_name; + ma_proc pa_stream_set_write_callback; + ma_proc pa_stream_set_read_callback; + ma_proc pa_stream_flush; + ma_proc pa_stream_drain; + ma_proc pa_stream_is_corked; + ma_proc pa_stream_cork; + ma_proc pa_stream_trigger; + ma_proc pa_stream_begin_write; + ma_proc pa_stream_write; + ma_proc pa_stream_peek; + ma_proc pa_stream_drop; + ma_proc pa_stream_writable_size; + ma_proc pa_stream_readable_size; + + char* pApplicationName; + char* pServerName; + ma_bool32 tryAutoSpawn; + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + ma_handle jackSO; + ma_proc jack_client_open; + ma_proc jack_client_close; + ma_proc jack_client_name_size; + ma_proc jack_set_process_callback; + ma_proc jack_set_buffer_size_callback; + ma_proc jack_on_shutdown; + ma_proc jack_get_sample_rate; + ma_proc jack_get_buffer_size; + ma_proc jack_get_ports; + ma_proc jack_activate; + ma_proc jack_deactivate; + ma_proc jack_connect; + ma_proc jack_port_register; + ma_proc jack_port_name; + ma_proc jack_port_get_buffer; + ma_proc jack_free; + + char* pClientName; + ma_bool32 tryStartServer; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_handle hCoreFoundation; + ma_proc CFStringGetCString; + ma_proc CFRelease; + + ma_handle hCoreAudio; + ma_proc AudioObjectGetPropertyData; + ma_proc AudioObjectGetPropertyDataSize; + ma_proc AudioObjectSetPropertyData; + ma_proc AudioObjectAddPropertyListener; + ma_proc AudioObjectRemovePropertyListener; + + ma_handle hAudioUnit; /* Could possibly be set to AudioToolbox on later versions of macOS. */ + ma_proc AudioComponentFindNext; + ma_proc AudioComponentInstanceDispose; + ma_proc AudioComponentInstanceNew; + ma_proc AudioOutputUnitStart; + ma_proc AudioOutputUnitStop; + ma_proc AudioUnitAddPropertyListener; + ma_proc AudioUnitGetPropertyInfo; + ma_proc AudioUnitGetProperty; + ma_proc AudioUnitSetProperty; + ma_proc AudioUnitInitialize; + ma_proc AudioUnitRender; + + /*AudioComponent*/ ma_ptr component; + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_handle sndioSO; + ma_proc sio_open; + ma_proc sio_close; + ma_proc sio_setpar; + ma_proc sio_getpar; + ma_proc sio_getcap; + ma_proc sio_start; + ma_proc sio_stop; + ma_proc sio_read; + ma_proc sio_write; + ma_proc sio_onmove; + ma_proc sio_nfds; + ma_proc sio_pollfd; + ma_proc sio_revents; + ma_proc sio_eof; + ma_proc sio_setvol; + ma_proc sio_onvol; + ma_proc sio_initpar; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int _unused; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int versionMajor; + int versionMinor; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + ma_handle hAAudio; /* libaaudio.so */ + ma_proc AAudio_createStreamBuilder; + ma_proc AAudioStreamBuilder_delete; + ma_proc AAudioStreamBuilder_setDeviceId; + ma_proc AAudioStreamBuilder_setDirection; + ma_proc AAudioStreamBuilder_setSharingMode; + ma_proc AAudioStreamBuilder_setFormat; + ma_proc AAudioStreamBuilder_setChannelCount; + ma_proc AAudioStreamBuilder_setSampleRate; + ma_proc AAudioStreamBuilder_setBufferCapacityInFrames; + ma_proc AAudioStreamBuilder_setFramesPerDataCallback; + ma_proc AAudioStreamBuilder_setDataCallback; + ma_proc AAudioStreamBuilder_setErrorCallback; + ma_proc AAudioStreamBuilder_setPerformanceMode; + ma_proc AAudioStreamBuilder_openStream; + ma_proc AAudioStream_close; + ma_proc AAudioStream_getState; + ma_proc AAudioStream_waitForStateChange; + ma_proc AAudioStream_getFormat; + ma_proc AAudioStream_getChannelCount; + ma_proc AAudioStream_getSampleRate; + ma_proc AAudioStream_getBufferCapacityInFrames; + ma_proc AAudioStream_getFramesPerDataCallback; + ma_proc AAudioStream_getFramesPerBurst; + ma_proc AAudioStream_requestStart; + ma_proc AAudioStream_requestStop; + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + int _unused; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + int _unused; + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + int _unused; + } null_backend; +#endif + }; + + union + { +#ifdef MA_WIN32 + struct + { + /*HMODULE*/ ma_handle hOle32DLL; + ma_proc CoInitializeEx; + ma_proc CoUninitialize; + ma_proc CoCreateInstance; + ma_proc CoTaskMemFree; + ma_proc PropVariantClear; + ma_proc StringFromGUID2; + + /*HMODULE*/ ma_handle hUser32DLL; + ma_proc GetForegroundWindow; + ma_proc GetDesktopWindow; + + /*HMODULE*/ ma_handle hAdvapi32DLL; + ma_proc RegOpenKeyExA; + ma_proc RegCloseKey; + ma_proc RegQueryValueExA; + } win32; +#endif +#ifdef MA_POSIX + struct + { + ma_handle pthreadSO; + ma_proc pthread_create; + ma_proc pthread_join; + ma_proc pthread_mutex_init; + ma_proc pthread_mutex_destroy; + ma_proc pthread_mutex_lock; + ma_proc pthread_mutex_unlock; + ma_proc pthread_cond_init; + ma_proc pthread_cond_destroy; + ma_proc pthread_cond_wait; + ma_proc pthread_cond_signal; + ma_proc pthread_attr_init; + ma_proc pthread_attr_destroy; + ma_proc pthread_attr_setschedpolicy; + ma_proc pthread_attr_getschedparam; + ma_proc pthread_attr_setschedparam; + } posix; +#endif + int _unused; + }; +}; + +struct ma_device +{ + ma_context* pContext; + ma_device_type type; + ma_uint32 sampleRate; + volatile ma_uint32 state; /* The state of the device is variable and can change at any time on any thread, so tell the compiler as such with `volatile`. */ + ma_device_callback_proc onData; /* Set once at initialization time and should not be changed after. */ + ma_stop_proc onStop; /* Set once at initialization time and should not be changed after. */ + void* pUserData; /* Application defined data. */ + ma_mutex lock; + ma_event wakeupEvent; + ma_event startEvent; + ma_event stopEvent; + ma_thread thread; + ma_result workResult; /* This is set by the worker thread after it's finished doing a job. */ + ma_bool32 usingDefaultSampleRate : 1; + ma_bool32 usingDefaultBufferSize : 1; + ma_bool32 usingDefaultPeriods : 1; + ma_bool32 isOwnerOfContext : 1; /* When set to true, uninitializing the device will also uninitialize the context. Set to true when NULL is passed into ma_device_init(). */ + ma_bool32 noPreZeroedOutputBuffer : 1; + ma_bool32 noClip : 1; + volatile float masterVolumeFactor; /* Volatile so we can use some thread safety when applying volume to periods. */ + struct + { + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfOrder; + } linear; + struct + { + int quality; + } speex; + } resampling; + struct + { + char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_bool32 usingDefaultFormat : 1; + ma_bool32 usingDefaultChannels : 1; + ma_bool32 usingDefaultChannelMap : 1; + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_data_converter converter; + } playback; + struct + { + char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_bool32 usingDefaultFormat : 1; + ma_bool32 usingDefaultChannels : 1; + ma_bool32 usingDefaultChannelMap : 1; + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_data_converter converter; + } capture; + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + /*IAudioClient**/ ma_ptr pAudioClientPlayback; + /*IAudioClient**/ ma_ptr pAudioClientCapture; + /*IAudioRenderClient**/ ma_ptr pRenderClient; + /*IAudioCaptureClient**/ ma_ptr pCaptureClient; + /*IMMDeviceEnumerator**/ ma_ptr pDeviceEnumerator; /* Used for IMMNotificationClient notifications. Required for detecting default device changes. */ + ma_IMMNotificationClient notificationClient; + /*HANDLE*/ ma_handle hEventPlayback; /* Auto reset. Initialized to signaled. */ + /*HANDLE*/ ma_handle hEventCapture; /* Auto reset. Initialized to unsignaled. */ + ma_uint32 actualPeriodSizeInFramesPlayback; /* Value from GetBufferSize(). internalPeriodSizeInFrames is not set to the _actual_ buffer size when low-latency shared mode is being used due to the way the IAudioClient3 API works. */ + ma_uint32 actualPeriodSizeInFramesCapture; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_bool32 hasDefaultPlaybackDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_bool32 hasDefaultCaptureDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_uint32 periodSizeInFramesPlayback; + ma_uint32 periodSizeInFramesCapture; + ma_bool32 isStartedCapture; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_bool32 isStartedPlayback; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_bool32 noAutoConvertSRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool32 noDefaultQualitySRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool32 noHardwareOffloading : 1; + ma_bool32 allowCaptureAutoStreamRouting : 1; + ma_bool32 allowPlaybackAutoStreamRouting : 1; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + /*LPDIRECTSOUND*/ ma_ptr pPlayback; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackPrimaryBuffer; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackBuffer; + /*LPDIRECTSOUNDCAPTURE*/ ma_ptr pCapture; + /*LPDIRECTSOUNDCAPTUREBUFFER*/ ma_ptr pCaptureBuffer; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + /*HWAVEOUT*/ ma_handle hDevicePlayback; + /*HWAVEIN*/ ma_handle hDeviceCapture; + /*HANDLE*/ ma_handle hEventPlayback; + /*HANDLE*/ ma_handle hEventCapture; + ma_uint32 fragmentSizeInFrames; + ma_uint32 iNextHeaderPlayback; /* [0,periods). Used as an index into pWAVEHDRPlayback. */ + ma_uint32 iNextHeaderCapture; /* [0,periods). Used as an index into pWAVEHDRCapture. */ + ma_uint32 headerFramesConsumedPlayback; /* The number of PCM frames consumed in the buffer in pWAVEHEADER[iNextHeader]. */ + ma_uint32 headerFramesConsumedCapture; /* ^^^ */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRPlayback; /* One instantiation for each period. */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRCapture; /* One instantiation for each period. */ + ma_uint8* pIntermediaryBufferPlayback; + ma_uint8* pIntermediaryBufferCapture; + ma_uint8* _pHeapData; /* Used internally and is used for the heap allocated data for the intermediary buffer and the WAVEHDR structures. */ + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + /*snd_pcm_t**/ ma_ptr pPCMPlayback; + /*snd_pcm_t**/ ma_ptr pPCMCapture; + ma_bool32 isUsingMMapPlayback : 1; + ma_bool32 isUsingMMapCapture : 1; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + /*pa_mainloop**/ ma_ptr pMainLoop; + /*pa_mainloop_api**/ ma_ptr pAPI; + /*pa_context**/ ma_ptr pPulseContext; + /*pa_stream**/ ma_ptr pStreamPlayback; + /*pa_stream**/ ma_ptr pStreamCapture; + /*pa_context_state*/ ma_uint32 pulseContextState; + void* pMappedBufferPlayback; + const void* pMappedBufferCapture; + ma_uint32 mappedBufferFramesRemainingPlayback; + ma_uint32 mappedBufferFramesRemainingCapture; + ma_uint32 mappedBufferFramesCapacityPlayback; + ma_uint32 mappedBufferFramesCapacityCapture; + ma_bool32 breakFromMainLoop : 1; + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + /*jack_client_t**/ ma_ptr pClient; + /*jack_port_t**/ ma_ptr pPortsPlayback[MA_MAX_CHANNELS]; + /*jack_port_t**/ ma_ptr pPortsCapture[MA_MAX_CHANNELS]; + float* pIntermediaryBufferPlayback; /* Typed as a float because JACK is always floating point. */ + float* pIntermediaryBufferCapture; + ma_pcm_rb duplexRB; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_uint32 deviceObjectIDPlayback; + ma_uint32 deviceObjectIDCapture; + /*AudioUnit*/ ma_ptr audioUnitPlayback; + /*AudioUnit*/ ma_ptr audioUnitCapture; + /*AudioBufferList**/ ma_ptr pAudioBufferList; /* Only used for input devices. */ + ma_event stopEvent; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_bool32 isDefaultPlaybackDevice; + ma_bool32 isDefaultCaptureDevice; + ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + ma_pcm_rb duplexRB; + void* pRouteChangeHandler; /* Only used on mobile platforms. Obj-C object for handling route changes. */ + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_ptr handlePlayback; + ma_ptr handleCapture; + ma_bool32 isStartedPlayback; + ma_bool32 isStartedCapture; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int fdPlayback; + int fdCapture; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int fdPlayback; + int fdCapture; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + /*AAudioStream**/ ma_ptr pStreamPlayback; + /*AAudioStream**/ ma_ptr pStreamCapture; + ma_pcm_rb duplexRB; + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + /*SLObjectItf*/ ma_ptr pOutputMixObj; + /*SLOutputMixItf*/ ma_ptr pOutputMix; + /*SLObjectItf*/ ma_ptr pAudioPlayerObj; + /*SLPlayItf*/ ma_ptr pAudioPlayer; + /*SLObjectItf*/ ma_ptr pAudioRecorderObj; + /*SLRecordItf*/ ma_ptr pAudioRecorder; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueuePlayback; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueueCapture; + ma_bool32 isDrainingCapture; + ma_bool32 isDrainingPlayback; + ma_uint32 currentBufferIndexPlayback; + ma_uint32 currentBufferIndexCapture; + ma_uint8* pBufferPlayback; /* This is malloc()'d and is used for storing audio data. Typed as ma_uint8 for easy offsetting. */ + ma_uint8* pBufferCapture; + ma_pcm_rb duplexRB; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + int indexPlayback; /* We use a factory on the JavaScript side to manage devices and use an index for JS/C interop. */ + int indexCapture; + ma_pcm_rb duplexRB; /* In external capture format. */ + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + ma_thread deviceThread; + ma_event operationEvent; + ma_event operationCompletionEvent; + ma_uint32 operation; + ma_result operationResult; + ma_timer timer; + double priorRunTime; + ma_uint32 currentPeriodFramesRemainingPlayback; + ma_uint32 currentPeriodFramesRemainingCapture; + ma_uint64 lastProcessedFramePlayback; + ma_uint32 lastProcessedFrameCapture; + ma_bool32 isStarted; + } null_device; +#endif + }; +}; +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(pop) +#else + #pragma GCC diagnostic pop /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#endif + +/* +Initializes a `ma_context_config` object. + + +Return Value +------------ +A `ma_context_config` initialized to defaults. + + +Remarks +------- +You must always use this to initialize the default state of the `ma_context_config` object. Not using this will result in your program breaking when miniaudio +is updated and new members are added to `ma_context_config`. It also sets logical defaults. + +You can override members of the returned object by changing it's members directly. + + +See Also +-------- +ma_context_init() +*/ +MA_API ma_context_config ma_context_config_init(void); + +/* +Initializes a context. + +The context is used for selecting and initializing an appropriate backend and to represent the backend at a more global level than that of an individual +device. There is one context to many devices, and a device is created from a context. A context is required to enumerate devices. + + +Parameters +---------- +backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + +backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + +pConfig (in, optional) + The context configuration. + +pContext (in) + A pointer to the context object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + +Remarks +------- +When `backends` is NULL, the default priority order will be used. Below is a list of backends in priority order: + + |-------------|-----------------------|--------------------------------------------------------| + | Name | Enum Name | Supported Operating Systems | + |-------------|-----------------------|--------------------------------------------------------| + | WASAPI | ma_backend_wasapi | Windows Vista+ | + | DirectSound | ma_backend_dsound | Windows XP+ | + | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) | + | Core Audio | ma_backend_coreaudio | macOS, iOS | + | ALSA | ma_backend_alsa | Linux | + | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | + | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | + | sndio | ma_backend_sndio | OpenBSD | + | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | + | OSS | ma_backend_oss | FreeBSD | + | AAudio | ma_backend_aaudio | Android 8+ | + | OpenSL|ES | ma_backend_opensl | Android (API level 16+) | + | Web Audio | ma_backend_webaudio | Web (via Emscripten) | + | Null | ma_backend_null | Cross Platform (not used on Web) | + |-------------|-----------------------|--------------------------------------------------------| + +The context can be configured via the `pConfig` argument. The config object is initialized with `ma_context_config_init()`. Individual configuration settings +can then be set directly on the structure. Below are the members of the `ma_context_config` object. + + logCallback + Callback for handling log messages from miniaudio. + + threadPriority + The desired priority to use for the audio thread. Allowable values include the following: + + |--------------------------------------| + | Thread Priority | + |--------------------------------------| + | ma_thread_priority_idle | + | ma_thread_priority_lowest | + | ma_thread_priority_low | + | ma_thread_priority_normal | + | ma_thread_priority_high | + | ma_thread_priority_highest (default) | + | ma_thread_priority_realtime | + | ma_thread_priority_default | + |--------------------------------------| + + pUserData + A pointer to application-defined data. This can be accessed from the context object directly such as `context.pUserData`. + + allocationCallbacks + Structure containing custom allocation callbacks. Leaving this at defaults will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. These allocation + callbacks will be used for anything tied to the context, including devices. + + alsa.useVerboseDeviceEnumeration + ALSA will typically enumerate many different devices which can be intrusive and not user-friendly. To combat this, miniaudio will enumerate only unique + card/device pairs by default. The problem with this is that you lose a bit of flexibility and control. Setting alsa.useVerboseDeviceEnumeration makes + it so the ALSA backend includes all devices. Defaults to false. + + pulse.pApplicationName + PulseAudio only. The application name to use when initializing the PulseAudio context with `pa_context_new()`. + + pulse.pServerName + PulseAudio only. The name of the server to connect to with `pa_context_connect()`. + + pulse.tryAutoSpawn + PulseAudio only. Whether or not to try automatically starting the PulseAudio daemon. Defaults to false. If you set this to true, keep in mind that + miniaudio uses a trial and error method to find the most appropriate backend, and this will result in the PulseAudio daemon starting which may be + intrusive for the end user. + + coreaudio.sessionCategory + iOS only. The session category to use for the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |-----------------------------------------|-------------------------------------| + | miniaudio Token | Core Audio Token | + |-----------------------------------------|-------------------------------------| + | ma_ios_session_category_ambient | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_solo_ambient | AVAudioSessionCategorySoloAmbient | + | ma_ios_session_category_playback | AVAudioSessionCategoryPlayback | + | ma_ios_session_category_record | AVAudioSessionCategoryRecord | + | ma_ios_session_category_play_and_record | AVAudioSessionCategoryPlayAndRecord | + | ma_ios_session_category_multi_route | AVAudioSessionCategoryMultiRoute | + | ma_ios_session_category_none | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_default | AVAudioSessionCategoryAmbient | + |-----------------------------------------|-------------------------------------| + + coreaudio.sessionCategoryOptions + iOS only. Session category options to use with the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | miniaudio Token | Core Audio Token | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | ma_ios_session_category_option_mix_with_others | AVAudioSessionCategoryOptionMixWithOthers | + | ma_ios_session_category_option_duck_others | AVAudioSessionCategoryOptionDuckOthers | + | ma_ios_session_category_option_allow_bluetooth | AVAudioSessionCategoryOptionAllowBluetooth | + | ma_ios_session_category_option_default_to_speaker | AVAudioSessionCategoryOptionDefaultToSpeaker | + | ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others | AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers | + | ma_ios_session_category_option_allow_bluetooth_a2dp | AVAudioSessionCategoryOptionAllowBluetoothA2DP | + | ma_ios_session_category_option_allow_air_play | AVAudioSessionCategoryOptionAllowAirPlay | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + + jack.pClientName + The name of the client to pass to `jack_client_open()`. + + jack.tryStartServer + Whether or not to try auto-starting the JACK server. Defaults to false. + + +It is recommended that only a single context is active at any given time because it's a bulky data structure which performs run-time linking for the +relevant backends every time it's initialized. + +The location of the context cannot change throughout it's lifetime. Consider allocating the `ma_context` object with `malloc()` if this is an issue. The +reason for this is that a pointer to the context is stored in the `ma_device` structure. + + +Example 1 - Default Initialization +---------------------------------- +The example below shows how to initialize the context using the default configuration. + +```c +ma_context context; +ma_result result = ma_context_init(NULL, 0, NULL, &context); +if (result != MA_SUCCESS) { + // Error. +} +``` + + +Example 2 - Custom Configuration +-------------------------------- +The example below shows how to initialize the context using custom backend priorities and a custom configuration. In this hypothetical example, the program +wants to prioritize ALSA over PulseAudio on Linux. They also want to avoid using the WinMM backend on Windows because it's latency is too high. They also +want an error to be returned if no valid backend is available which they achieve by excluding the Null backend. + +For the configuration, the program wants to capture any log messages so they can, for example, route it to a log file and user interface. + +```c +ma_backend backends[] = { + ma_backend_alsa, + ma_backend_pulseaudio, + ma_backend_wasapi, + ma_backend_dsound +}; + +ma_context_config config = ma_context_config_init(); +config.logCallback = my_log_callback; +config.pUserData = pMyUserData; + +ma_context context; +ma_result result = ma_context_init(backends, sizeof(backends)/sizeof(backends[0]), &config, &context); +if (result != MA_SUCCESS) { + // Error. + if (result == MA_NO_BACKEND) { + // Couldn't find an appropriate backend. + } +} +``` + + +See Also +-------- +ma_context_config_init() +ma_context_uninit() +*/ +MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext); + +/* +Uninitializes a context. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + +Remarks +------- +Results are undefined if you call this while any device created by this context is still active. + + +See Also +-------- +ma_context_init() +*/ +MA_API ma_result ma_context_uninit(ma_context* pContext); + +/* +Retrieves the size of the ma_context object. + +This is mainly for the purpose of bindings to know how much memory to allocate. +*/ +MA_API size_t ma_context_sizeof(); + +/* +Enumerates over every device (both playback and capture). + +This is a lower-level enumeration function to the easier to use `ma_context_get_devices()`. Use `ma_context_enumerate_devices()` if you would rather not incur +an internal heap allocation, or it simply suits your code better. + +Note that this only retrieves the ID and name/description of the device. The reason for only retrieving basic information is that it would otherwise require +opening the backend device in order to probe it for more detailed information which can be inefficient. Consider using `ma_context_get_device_info()` for this, +but don't call it from within the enumeration callback. + +Returning false from the callback will stop enumeration. Returning true will continue enumeration. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +callback (in) + The callback to fire for each enumerated device. + +pUserData (in) + A pointer to application-defined data passed to the callback. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. This is guarded using a simple mutex lock. + + +Remarks +------- +Do _not_ assume the first enumerated device of a given type is the default device. + +Some backends and platforms may only support default playback and capture devices. + +In general, you should not do anything complicated from within the callback. In particular, do not try initializing a device from within the callback. Also, +do not try to call `ma_context_get_device_info()` from within the callback. + +Consider using `ma_context_get_devices()` for a simpler and safer API, albeit at the expense of an internal heap allocation. + + +Example 1 - Simple Enumeration +------------------------------ +ma_bool32 ma_device_enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + printf("Device Name: %s\n", pInfo->name); + return MA_TRUE; +} + +ma_result result = ma_context_enumerate_devices(&context, my_device_enum_callback, pMyUserData); +if (result != MA_SUCCESS) { + // Error. +} + + +See Also +-------- +ma_context_get_devices() +*/ +MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); + +/* +Retrieves basic information about every active playback and/or capture device. + +This function will allocate memory internally for the device lists and return a pointer to them through the `ppPlaybackDeviceInfos` and `ppCaptureDeviceInfos` +parameters. If you do not want to incur the overhead of these allocations consider using `ma_context_enumerate_devices()` which will instead use a callback. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +ppPlaybackDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for playback devices. + +pPlaybackDeviceCount (out) + A pointer to an unsigned integer that will receive the number of playback devices. + +ppCaptureDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for capture devices. + +pCaptureDeviceCount (out) + A pointer to an unsigned integer that will receive the number of capture devices. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Since each call to this function invalidates the pointers from the previous call, you should not be calling this simultaneously across multiple +threads. Instead, you need to make a copy of the returned data with your own higher level synchronization. + + +Remarks +------- +It is _not_ safe to assume the first device in the list is the default device. + +You can pass in NULL for the playback or capture lists in which case they'll be ignored. + +The returned pointers will become invalid upon the next call this this function, or when the context is uninitialized. Do not free the returned pointers. + + +See Also +-------- +ma_context_get_devices() +*/ +MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount); + +/* +Retrieves information about a device of the given type, with the specified ID and share mode. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the query. + +deviceType (in) + The type of the device being queried. Must be either `ma_device_type_playback` or `ma_device_type_capture`. + +pDeviceID (in) + The ID of the device being queried. + +shareMode (in) + The share mode to query for device capabilities. This should be set to whatever you're intending on using when initializing the device. If you're unsure, + set this to `ma_share_mode_shared`. + +pDeviceInfo (out) + A pointer to the `ma_device_info` structure that will receive the device information. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. This is guarded using a simple mutex lock. + + +Remarks +------- +Do _not_ call this from within the `ma_context_enumerate_devices()` callback. + +It's possible for a device to have different information and capabilities depending on whether or not it's opened in shared or exclusive mode. For example, in +shared mode, WASAPI always uses floating point samples for mixing, but in exclusive mode it can be anything. Therefore, this function allows you to specify +which share mode you want information for. Note that not all backends and devices support shared or exclusive mode, in which case this function will fail if +the requested share mode is unsupported. + +This leaves pDeviceInfo unmodified in the result of an error. +*/ +MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo); + +/* +Determines if the given context supports loopback mode. + + +Parameters +---------- +pContext (in) + A pointer to the context getting queried. + + +Return Value +------------ +MA_TRUE if the context supports loopback mode; MA_FALSE otherwise. +*/ +MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext); + + + +/* +Initializes a device config with default settings. + + +Parameters +---------- +deviceType (in) + The type of the device this config is being initialized for. This must set to one of the following: + + |-------------------------| + | Device Type | + |-------------------------| + | ma_device_type_playback | + | ma_device_type_capture | + | ma_device_type_duplex | + | ma_device_type_loopback | + |-------------------------| + + +Return Value +------------ +A new device config object with default settings. You will typically want to adjust the config after this function returns. See remarks. + + +Thread Safety +------------- +Safe. + + +Callback Safety +--------------- +Safe, but don't try initializing a device in a callback. + + +Remarks +------- +The returned config will be initialized to defaults. You will normally want to customize a few variables before initializing the device. See Example 1 for a +typical configuration which sets the sample format, channel count, sample rate, data callback and user data. These are usually things you will want to change +before initializing the device. + +See `ma_device_init()` for details on specific configuration options. + + +Example 1 - Simple Configuration +-------------------------------- +The example below is what a program will typically want to configure for each device at a minimum. Notice how `ma_device_config_init()` is called first, and +then the returned object is modified directly. This is important because it ensures that your program continues to work as new configuration options are added +to the `ma_device_config` structure. + +```c +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pUserData = pMyUserData; +``` + + +See Also +-------- +ma_device_init() +ma_device_init_ex() +*/ +MA_API ma_device_config ma_device_config_init(ma_device_type deviceType); + + +/* +Initializes a device. + +A device represents a physical audio device. The idea is you send or receive audio data from the device to either play it back through a speaker, or capture it +from a microphone. Whether or not you should send or receive data from the device (or both) depends on the type of device you are initializing which can be +playback, capture, full-duplex or loopback. (Note that loopback mode is only supported on select backends.) Sending and receiving audio data to and from the +device is done via a callback which is fired by miniaudio at periodic time intervals. + +The frequency at which data is delivered to and from a device depends on the size of it's period. The size of the period can be defined in terms of PCM frames +or milliseconds, whichever is more convenient. Generally speaking, the smaller the period, the lower the latency at the expense of higher CPU usage and +increased risk of glitching due to the more frequent and granular data deliver intervals. The size of a period will depend on your requirements, but +miniaudio's defaults should work fine for most scenarios. If you're building a game you should leave this fairly small, whereas if you're building a simple +media player you can make it larger. Note that the period size you request is actually just a hint - miniaudio will tell the backend what you want, but the +backend is ultimately responsible for what it gives you. You cannot assume you will get exactly what you ask for. + +When delivering data to and from a device you need to make sure it's in the correct format which you can set through the device configuration. You just set the +format that you want to use and miniaudio will perform all of the necessary conversion for you internally. When delivering data to and from the callback you +can assume the format is the same as what you requested when you initialized the device. See Remarks for more details on miniaudio's data conversion pipeline. + + +Parameters +---------- +pContext (in, optional) + A pointer to the context that owns the device. This can be null, in which case it creates a default context internally. + +pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + +pDevice (out) + A pointer to the device object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to +calling this at the same time as `ma_device_uninit()`. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +Setting `pContext` to NULL will result in miniaudio creating a default context internally and is equivalent to passing in a context initialized like so: + + ```c + ma_context_init(NULL, 0, NULL, &context); + ``` + +Do not set `pContext` to NULL if you are needing to open multiple devices. You can, however, use NULL when initializing the first device, and then use +device.pContext for the initialization of other devices. + +The device can be configured via the `pConfig` argument. The config object is initialized with `ma_device_config_init()`. Individual configuration settings can +then be set directly on the structure. Below are the members of the `ma_device_config` object. + + deviceType + Must be `ma_device_type_playback`, `ma_device_type_capture`, `ma_device_type_duplex` of `ma_device_type_loopback`. + + sampleRate + The sample rate, in hertz. The most common sample rates are 48000 and 44100. Setting this to 0 will use the device's native sample rate. + + periodSizeInFrames + The desired size of a period in PCM frames. If this is 0, `periodSizeInMilliseconds` will be used instead. If both are 0 the default buffer size will + be used depending on the selected performance profile. This value affects latency. See below for details. + + periodSizeInMilliseconds + The desired size of a period in milliseconds. If this is 0, `periodSizeInFrames` will be used instead. If both are 0 the default buffer size will be + used depending on the selected performance profile. The value affects latency. See below for details. + + periods + The number of periods making up the device's entire buffer. The total buffer size is `periodSizeInFrames` or `periodSizeInMilliseconds` multiplied by + this value. This is just a hint as backends will be the ones who ultimately decide how your periods will be configured. + + performanceProfile + A hint to miniaudio as to the performance requirements of your program. Can be either `ma_performance_profile_low_latency` (default) or + `ma_performance_profile_conservative`. This mainly affects the size of default buffers and can usually be left at it's default value. + + noPreZeroedOutputBuffer + When set to true, the contents of the output buffer passed into the data callback will be left undefined. When set to false (default), the contents of + the output buffer will be cleared the zero. You can use this to avoid the overhead of zeroing out the buffer if you can guarantee that your data + callback will write to every sample in the output buffer, or if you are doing your own clearing. + + noClip + When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. When set to false (default), the + contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or not the clip. This only + applies when the playback sample format is f32. + + dataCallback + The callback to fire whenever data is ready to be delivered to or from the device. + + stopCallback + The callback to fire whenever the device has stopped, either explicitly via `ma_device_stop()`, or implicitly due to things like the device being + disconnected. + + pUserData + The user data pointer to use with the device. You can access this directly from the device object like `device.pUserData`. + + resampling.algorithm + The resampling algorithm to use when miniaudio needs to perform resampling between the rate specified by `sampleRate` and the device's native rate. The + default value is `ma_resample_algorithm_linear`, and the quality can be configured with `resampling.linear.lpfOrder`. + + resampling.linear.lpfOrder + The linear resampler applies a low-pass filter as part of it's procesing for anti-aliasing. This setting controls the order of the filter. The higher + the value, the better the quality, in general. Setting this to 0 will disable low-pass filtering altogether. The maximum value is + `MA_MAX_FILTER_ORDER`. The default value is `min(4, MA_MAX_FILTER_ORDER)`. + + playback.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the playback device to initialize. Setting this NULL (default) will use the system's + default playback device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + playback.format + The sample format to use for playback. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.playback.format`. + + playback.channels + The number of channels to use for playback. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.playback.channels`. + + playback.channelMap + The channel map to use for playback. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.playback.channelMap`. + + playback.shareMode + The preferred share mode to use for playback. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + capture.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the capture device to initialize. Setting this NULL (default) will use the system's + default capture device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + capture.format + The sample format to use for capture. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.capture.format`. + + capture.channels + The number of channels to use for capture. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.capture.channels`. + + capture.channelMap + The channel map to use for capture. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.capture.channelMap`. + + capture.shareMode + The preferred share mode to use for capture. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + wasapi.noAutoConvertSRC + WASAPI only. When set to true, disables WASAPI's automatic resampling and forces the use of miniaudio's resampler. Defaults to false. + + wasapi.noDefaultQualitySRC + WASAPI only. Only used when `wasapi.noAutoConvertSRC` is set to false. When set to true, disables the use of `AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY`. + You should usually leave this set to false, which is the default. + + wasapi.noAutoStreamRouting + WASAPI only. When set to true, disables automatic stream routing on the WASAPI backend. Defaults to false. + + wasapi.noHardwareOffloading + WASAPI only. When set to true, disables the use of WASAPI's hardware offloading feature. Defaults to false. + + alsa.noMMap + ALSA only. When set to true, disables MMap mode. Defaults to false. + + alsa.noAutoFormat + ALSA only. When set to true, disables ALSA's automatic format conversion by including the SND_PCM_NO_AUTO_FORMAT flag. Defaults to false. + + alsa.noAutoChannels + ALSA only. When set to true, disables ALSA's automatic channel conversion by including the SND_PCM_NO_AUTO_CHANNELS flag. Defaults to false. + + alsa.noAutoResample + ALSA only. When set to true, disables ALSA's automatic resampling by including the SND_PCM_NO_AUTO_RESAMPLE flag. Defaults to false. + + pulse.pStreamNamePlayback + PulseAudio only. Sets the stream name for playback. + + pulse.pStreamNameCapture + PulseAudio only. Sets the stream name for capture. + + +Once initialized, the device's config is immutable. If you need to change the config you will need to initialize a new device. + +After initializing the device it will be in a stopped state. To start it, use `ma_device_start()`. + +If both `periodSizeInFrames` and `periodSizeInMilliseconds` are set to zero, it will default to `MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY` or +`MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE`, depending on whether or not `performanceProfile` is set to `ma_performance_profile_low_latency` or +`ma_performance_profile_conservative`. + +If you request exclusive mode and the backend does not support it an error will be returned. For robustness, you may want to first try initializing the device +in exclusive mode, and then fall back to shared mode if required. Alternatively you can just request shared mode (the default if you leave it unset in the +config) which is the most reliable option. Some backends do not have a practical way of choosing whether or not the device should be exclusive or not (ALSA, +for example) in which case it just acts as a hint. Unless you have special requirements you should try avoiding exclusive mode as it's intrusive to the user. +Starting with Windows 10, miniaudio will use low-latency shared mode where possible which may make exclusive mode unnecessary. + +When sending or receiving data to/from a device, miniaudio will internally perform a format conversion to convert between the format specified by the config +and the format used internally by the backend. If you pass in 0 for the sample format, channel count, sample rate _and_ channel map, data transmission will run +on an optimized pass-through fast path. You can retrieve the format, channel count and sample rate by inspecting the `playback/capture.format`, +`playback/capture.channels` and `sampleRate` members of the device object. + +When compiling for UWP you must ensure you call this function on the main UI thread because the operating system may need to present the user with a message +asking for permissions. Please refer to the official documentation for ActivateAudioInterfaceAsync() for more information. + +ALSA Specific: When initializing the default device, requesting shared mode will try using the "dmix" device for playback and the "dsnoop" device for capture. +If these fail it will try falling back to the "hw" device. + + +Example 1 - Simple Initialization +--------------------------------- +This example shows how to initialize a simple playback device using a standard configuration. If you are just needing to do simple playback from the default +playback device this is usually all you need. + +```c +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pMyUserData = pMyUserData; + +ma_device device; +ma_result result = ma_device_init(NULL, &config, &device); +if (result != MA_SUCCESS) { + // Error +} +``` + + +Example 2 - Advanced Initialization +----------------------------------- +This example shows how you might do some more advanced initialization. In this hypothetical example we want to control the latency by setting the buffer size +and period count. We also want to allow the user to be able to choose which device to output from which means we need a context so we can perform device +enumeration. + +```c +ma_context context; +ma_result result = ma_context_init(NULL, 0, NULL, &context); +if (result != MA_SUCCESS) { + // Error +} + +ma_device_info* pPlaybackDeviceInfos; +ma_uint32 playbackDeviceCount; +result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, NULL, NULL); +if (result != MA_SUCCESS) { + // Error +} + +// ... choose a device from pPlaybackDeviceInfos ... + +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.pDeviceID = pMyChosenDeviceID; // <-- Get this from the `id` member of one of the `ma_device_info` objects returned by ma_context_get_devices(). +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pUserData = pMyUserData; +config.periodSizeInMilliseconds = 10; +config.periods = 3; + +ma_device device; +result = ma_device_init(&context, &config, &device); +if (result != MA_SUCCESS) { + // Error +} +``` + + +See Also +-------- +ma_device_config_init() +ma_device_uninit() +ma_device_start() +ma_context_init() +ma_context_get_devices() +ma_context_enumerate_devices() +*/ +MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice); + +/* +Initializes a device without a context, with extra parameters for controlling the configuration of the internal self-managed context. + +This is the same as `ma_device_init()`, only instead of a context being passed in, the parameters from `ma_context_init()` are passed in instead. This function +allows you to configure the internally created context. + + +Parameters +---------- +backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + +backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + +pContextConfig (in, optional) + The context configuration. + +pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + +pDevice (out) + A pointer to the device object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to +calling this at the same time as `ma_device_uninit()`. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +You only need to use this function if you want to configure the context differently to it's defaults. You should never use this function if you want to manage +your own context. + +See the documentation for `ma_context_init()` for information on the different context configuration options. + + +See Also +-------- +ma_device_init() +ma_device_uninit() +ma_device_config_init() +ma_context_init() +*/ +MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice); + +/* +Uninitializes a device. + +This will explicitly stop the device. You do not need to call `ma_device_stop()` beforehand, but it's harmless if you do. + + +Parameters +---------- +pDevice (in) + A pointer to the device to stop. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. As soon as this API is called the device should be considered undefined. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + +See Also +-------- +ma_device_init() +ma_device_stop() +*/ +MA_API void ma_device_uninit(ma_device* pDevice); + +/* +Starts the device. For playback devices this begins playback. For capture devices it begins recording. + +Use `ma_device_stop()` to stop the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device to start. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. It's safe to call this from any thread with the exception of the callback thread. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +For a playback device, this will retrieve an initial chunk of audio data from the client before returning. The reason for this is to ensure there is valid +audio data in the buffer, which needs to be done before the device begins playback. + +This API waits until the backend device has been started for real by the worker thread. It also waits on a mutex for thread-safety. + +Do not call this in any callback. + + +See Also +-------- +ma_device_stop() +*/ +MA_API ma_result ma_device_start(ma_device* pDevice); + +/* +Stops the device. For playback devices this stops playback. For capture devices it stops recording. + +Use `ma_device_start()` to start the device again. + + +Parameters +---------- +pDevice (in) + A pointer to the device to stop. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. It's safe to call this from any thread with the exception of the callback thread. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + +Remarks +------- +This API needs to wait on the worker thread to stop the backend device properly before returning. It also waits on a mutex for thread-safety. In addition, some +backends need to wait for the device to finish playback/recording of the current fragment which can take some time (usually proportionate to the buffer size +that was specified at initialization time). + +Backends are required to either pause the stream in-place or drain the buffer if pausing is not possible. The reason for this is that stopping the device and +the resuming it with ma_device_start() (which you might do when your program loses focus) may result in a situation where those samples are never output to the +speakers or received from the microphone which can in turn result in de-syncs. + +Do not call this in any callback. + +This will be called implicitly by `ma_device_uninit()`. + + +See Also +-------- +ma_device_start() +*/ +MA_API ma_result ma_device_stop(ma_device* pDevice); + +/* +Determines whether or not the device is started. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose start state is being retrieved. + + +Return Value +------------ +True if the device is started, false otherwise. + + +Thread Safety +------------- +Safe. If another thread calls `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, there's a very small chance the return +value will be out of sync. + + +Callback Safety +--------------- +Safe. This is implemented as a simple accessor. + + +See Also +-------- +ma_device_start() +ma_device_stop() +*/ +MA_API ma_bool32 ma_device_is_started(ma_device* pDevice); + +/* +Sets the master volume factor for the device. + +The volume factor must be between 0 (silence) and 1 (full volume). Use `ma_device_set_master_gain_db()` to use decibel notation, where 0 is full volume and +values less than 0 decreases the volume. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose volume is being set. + +volume (in) + The new volume factor. Must be within the range of [0, 1]. + + +Return Value +------------ +MA_SUCCESS if the volume was set successfully. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if the volume factor is not within the range of [0, 1]. + + +Thread Safety +------------- +Safe. This just sets a local member of the device object. + + +Callback Safety +--------------- +Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + +Remarks +------- +This applies the volume factor across all channels. + +This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + +See Also +-------- +ma_device_get_master_volume() +ma_device_set_master_volume_gain_db() +ma_device_get_master_volume_gain_db() +*/ +MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume); + +/* +Retrieves the master volume factor for the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose volume factor is being retrieved. + +pVolume (in) + A pointer to the variable that will receive the volume factor. The returned value will be in the range of [0, 1]. + + +Return Value +------------ +MA_SUCCESS if successful. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if pVolume is NULL. + + +Thread Safety +------------- +Safe. This just a simple member retrieval. + + +Callback Safety +--------------- +Safe. + + +Remarks +------- +If an error occurs, `*pVolume` will be set to 0. + + +See Also +-------- +ma_device_set_master_volume() +ma_device_set_master_volume_gain_db() +ma_device_get_master_volume_gain_db() +*/ +MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume); + +/* +Sets the master volume for the device as gain in decibels. + +A gain of 0 is full volume, whereas a gain of < 0 will decrease the volume. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose gain is being set. + +gainDB (in) + The new volume as gain in decibels. Must be less than or equal to 0, where 0 is full volume and anything less than 0 decreases the volume. + + +Return Value +------------ +MA_SUCCESS if the volume was set successfully. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if the gain is > 0. + + +Thread Safety +------------- +Safe. This just sets a local member of the device object. + + +Callback Safety +--------------- +Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + +Remarks +------- +This applies the gain across all channels. + +This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + +See Also +-------- +ma_device_get_master_volume_gain_db() +ma_device_set_master_volume() +ma_device_get_master_volume() +*/ +MA_API ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB); + +/* +Retrieves the master gain in decibels. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose gain is being retrieved. + +pGainDB (in) + A pointer to the variable that will receive the gain in decibels. The returned value will be <= 0. + + +Return Value +------------ +MA_SUCCESS if successful. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if pGainDB is NULL. + + +Thread Safety +------------- +Safe. This just a simple member retrieval. + + +Callback Safety +--------------- +Safe. + + +Remarks +------- +If an error occurs, `*pGainDB` will be set to 0. + + +See Also +-------- +ma_device_set_master_volume_gain_db() +ma_device_set_master_volume() +ma_device_get_master_volume() +*/ +MA_API ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB); + + + +/************************************************************************************************************************************************************ + +Utiltities + +************************************************************************************************************************************************************/ + +/* +Creates a mutex. + +A mutex must be created from a valid context. A mutex is initially unlocked. +*/ +MA_API ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex); + +/* +Deletes a mutex. +*/ +MA_API void ma_mutex_uninit(ma_mutex* pMutex); + +/* +Locks a mutex with an infinite timeout. +*/ +MA_API void ma_mutex_lock(ma_mutex* pMutex); + +/* +Unlocks a mutex. +*/ +MA_API void ma_mutex_unlock(ma_mutex* pMutex); + + +/* +Retrieves a friendly name for a backend. +*/ +MA_API const char* ma_get_backend_name(ma_backend backend); + +/* +Determines whether or not loopback mode is support by a backend. +*/ +MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend); + + +/* +Adjust buffer size based on a scaling factor. + +This just multiplies the base size by the scaling factor, making sure it's a size of at least 1. +*/ +MA_API ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale); + +/* +Calculates a buffer size in milliseconds from the specified number of frames and sample rate. +*/ +MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate); + +/* +Calculates a buffer size in frames from the specified number of milliseconds and sample rate. +*/ +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate); + +/* +Copies silent frames into the given buffer. +*/ +MA_API void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels); + +/* +Clips f32 samples. +*/ +MA_API void ma_clip_samples_f32(float* p, ma_uint32 sampleCount); +static MA_INLINE void ma_clip_pcm_frames_f32(float* p, ma_uint32 frameCount, ma_uint32 channels) { ma_clip_samples_f32(p, frameCount*channels); } + +/* +Helper for applying a volume factor to samples. + +Note that the source and destination buffers can be the same, in which case it'll perform the operation in-place. +*/ +MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint32 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint32 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint32 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint32 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint32 sampleCount, float factor); + +MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint32 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint32 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint32 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint32 sampleCount, float factor); +MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint32 sampleCount, float factor); + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFramesOut, const ma_uint8* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFramesOut, const ma_int16* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFramesOut, const ma_int32* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pPCMFramesOut, const float* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor); + +MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames(void* pFrames, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor); + + +/* +Helper for converting a linear factor to gain in decibels. +*/ +MA_API float ma_factor_to_gain_db(float factor); + +/* +Helper for converting gain in decibels to a linear factor. +*/ +MA_API float ma_gain_db_to_factor(float gain); + +#endif /* MA_NO_DEVICE_IO */ + + +#if !defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING) +typedef enum +{ + ma_seek_origin_start, + ma_seek_origin_current +} ma_seek_origin; + +typedef enum +{ + ma_resource_format_wav +} ma_resource_format; +#endif + +/************************************************************************************************************************************************************ + +Decoding +======== + +Decoders are independent of the main device API. Decoding APIs can be called freely inside the device's data callback, but they are not thread safe unless +you do your own synchronization. + +************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING +typedef struct ma_decoder ma_decoder; + +typedef size_t (* ma_decoder_read_proc) (ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead); /* Returns the number of bytes read. */ +typedef ma_bool32 (* ma_decoder_seek_proc) (ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin); +typedef ma_uint64 (* ma_decoder_read_pcm_frames_proc) (ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount); /* Returns the number of frames read. Output data is in internal format. */ +typedef ma_result (* ma_decoder_seek_to_pcm_frame_proc) (ma_decoder* pDecoder, ma_uint64 frameIndex); +typedef ma_result (* ma_decoder_uninit_proc) (ma_decoder* pDecoder); +typedef ma_uint64 (* ma_decoder_get_length_in_pcm_frames_proc)(ma_decoder* pDecoder); + +typedef struct +{ + ma_format format; /* Set to 0 or ma_format_unknown to use the stream's internal format. */ + ma_uint32 channels; /* Set to 0 to use the stream's internal channels. */ + ma_uint32 sampleRate; /* Set to 0 to use the stream's internal sample rate. */ + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_channel_mix_mode channelMixMode; + ma_dither_mode ditherMode; + struct + { + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfOrder; + } linear; + struct + { + int quality; + } speex; + } resampling; + ma_allocation_callbacks allocationCallbacks; +} ma_decoder_config; + +struct ma_decoder +{ + ma_decoder_read_proc onRead; + ma_decoder_seek_proc onSeek; + void* pUserData; + ma_uint64 readPointer; /* Used for returning back to a previous position after analysing the stream or whatnot. */ + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_format outputFormat; + ma_uint32 outputChannels; + ma_uint32 outputSampleRate; + ma_channel outputChannelMap[MA_MAX_CHANNELS]; + ma_data_converter converter; /* <-- Data conversion is achieved by running frames through this. */ + ma_allocation_callbacks allocationCallbacks; + ma_decoder_read_pcm_frames_proc onReadPCMFrames; + ma_decoder_seek_to_pcm_frame_proc onSeekToPCMFrame; + ma_decoder_uninit_proc onUninit; + ma_decoder_get_length_in_pcm_frames_proc onGetLengthInPCMFrames; + void* pInternalDecoder; /* <-- The drwav/drflac/stb_vorbis/etc. objects. */ + struct + { + const ma_uint8* pData; + size_t dataSize; + size_t currentReadPos; + } memory; /* Only used for decoders that were opened against a block of memory. */ +}; + +MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate); + +MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder); + +MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder); + +MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_flac(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_vorbis(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_mp3(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + +MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_wav_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_flac_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_vorbis_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_mp3_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + +MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder); + +/* +Retrieves the length of the decoder in PCM frames. + +Do not call this on streams of an undefined length, such as internet radio. + +If the length is unknown or an error occurs, 0 will be returned. + +This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio +uses internally. + +For MP3's, this will decode the entire file. Do not call this in time critical scenarios. + +This function is not thread safe without your own synchronization. +*/ +MA_API ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder); + +/* +Reads PCM frames from the given decoder. + +This is not thread safe without your own synchronization. +*/ +MA_API ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount); + +/* +Seeks to a PCM frame based on it's absolute index. + +This is not thread safe without your own synchronization. +*/ +MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex); + +/* +Helper for opening and decoding a file into a heap allocated block of memory. Free the returned pointer with ma_free(). On input, +pConfig should be set to what you want. On output it will be set to what you got. +*/ +MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut); +MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut); + +#endif /* MA_NO_DECODING */ + + +/************************************************************************************************************************************************************ + +Encoding +======== + +Encoders do not perform any format conversion for you. If your target format does not support the format, and error will be returned. + +************************************************************************************************************************************************************/ +#ifndef MA_NO_ENCODING +typedef struct ma_encoder ma_encoder; + +typedef size_t (* ma_encoder_write_proc) (ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite); /* Returns the number of bytes written. */ +typedef ma_bool32 (* ma_encoder_seek_proc) (ma_encoder* pEncoder, int byteOffset, ma_seek_origin origin); +typedef ma_result (* ma_encoder_init_proc) (ma_encoder* pEncoder); +typedef void (* ma_encoder_uninit_proc) (ma_encoder* pEncoder); +typedef ma_uint64 (* ma_encoder_write_pcm_frames_proc)(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount); + +typedef struct +{ + ma_resource_format resourceFormat; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_allocation_callbacks allocationCallbacks; +} ma_encoder_config; + +MA_API ma_encoder_config ma_encoder_config_init(ma_resource_format resourceFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + +struct ma_encoder +{ + ma_encoder_config config; + ma_encoder_write_proc onWrite; + ma_encoder_seek_proc onSeek; + ma_encoder_init_proc onInit; + ma_encoder_uninit_proc onUninit; + ma_encoder_write_pcm_frames_proc onWritePCMFrames; + void* pUserData; + void* pInternalEncoder; /* <-- The drwav/drflac/stb_vorbis/etc. objects. */ + void* pFile; /* FILE*. Only used when initialized with ma_encoder_init_file(). */ +}; + +MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API void ma_encoder_uninit(ma_encoder* pEncoder); +MA_API ma_uint64 ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount); + +#endif /* MA_NO_ENCODING */ + + +/************************************************************************************************************************************************************ + +Generation + +************************************************************************************************************************************************************/ +typedef enum +{ + ma_waveform_type_sine, + ma_waveform_type_square, + ma_waveform_type_triangle, + ma_waveform_type_sawtooth +} ma_waveform_type; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_waveform_type type; + double amplitude; + double frequency; +} ma_waveform_config; + +MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency); + +typedef struct +{ + ma_waveform_config config; + double advance; + double time; +} ma_waveform; + +MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform); +MA_API ma_uint64 ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount); +MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude); +MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency); +MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate); + + + +typedef struct +{ + ma_int32 state; +} ma_lcg; + +typedef enum +{ + ma_noise_type_white, + ma_noise_type_pink, + ma_noise_type_brownian +} ma_noise_type; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_noise_type type; + ma_int32 seed; + double amplitude; + ma_bool32 duplicateChannels; +} ma_noise_config; + +MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude); + +typedef struct +{ + ma_noise_config config; + ma_lcg lcg; + union + { + struct + { + double bin[MA_MAX_CHANNELS][16]; + double accumulation[MA_MAX_CHANNELS]; + ma_uint32 counter[MA_MAX_CHANNELS]; + } pink; + struct + { + double accumulation[MA_MAX_CHANNELS]; + } brownian; + } state; +} ma_noise; + +MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, ma_noise* pNoise); +MA_API ma_uint64 ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount); + + +#ifdef __cplusplus +} +#endif +#endif /* miniaudio_h */ + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +IMPLEMENTATION + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) +#include +#include /* For INT_MAX */ +#include /* sin(), etc. */ + +#include +#include +#if !defined(_MSC_VER) && !defined(__DMC__) + #include /* For strcasecmp(). */ + #include /* For wcslen(), wcsrtombs() */ +#endif + +#ifdef MA_WIN32 +#include +#else +#include /* For malloc(), free(), wcstombs(). */ +#include /* For memset() */ +#endif + +#ifdef MA_EMSCRIPTEN +#include +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef _WIN32 +#ifdef _WIN64 +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef __GNUC__ +#ifdef __LP64__ +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#include +#if INTPTR_MAX == INT64_MAX +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif + +/* Architecture Detection */ +#if defined(__x86_64__) || defined(_M_X64) +#define MA_X64 +#elif defined(__i386) || defined(_M_IX86) +#define MA_X86 +#elif defined(__arm__) || defined(_M_ARM) +#define MA_ARM +#endif + +/* Cannot currently support AVX-512 if AVX is disabled. */ +#if !defined(MA_NO_AVX512) && defined(MA_NO_AVX2) +#define MA_NO_AVX512 +#endif + +/* Intrinsics Support */ +#if defined(MA_X64) || defined(MA_X86) + #if defined(_MSC_VER) && !defined(__clang__) + /* MSVC. */ + #if _MSC_VER >= 1400 && !defined(MA_NO_SSE2) /* 2005 */ + #define MA_SUPPORT_SSE2 + #endif + /*#if _MSC_VER >= 1600 && !defined(MA_NO_AVX)*/ /* 2010 */ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if _MSC_VER >= 1700 && !defined(MA_NO_AVX2) /* 2012 */ + #define MA_SUPPORT_AVX2 + #endif + #if _MSC_VER >= 1910 && !defined(MA_NO_AVX512) /* 2017 */ + #define MA_SUPPORT_AVX512 + #endif + #else + /* Assume GNUC-style. */ + #if defined(__SSE2__) && !defined(MA_NO_SSE2) + #define MA_SUPPORT_SSE2 + #endif + /*#if defined(__AVX__) && !defined(MA_NO_AVX)*/ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if defined(__AVX2__) && !defined(MA_NO_AVX2) + #define MA_SUPPORT_AVX2 + #endif + #if defined(__AVX512F__) && !defined(MA_NO_AVX512) + #define MA_SUPPORT_AVX512 + #endif + #endif + + /* If at this point we still haven't determined compiler support for the intrinsics just fall back to __has_include. */ + #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) + #if !defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && __has_include() + #define MA_SUPPORT_SSE2 + #endif + /*#if !defined(MA_SUPPORT_AVX) && !defined(MA_NO_AVX) && __has_include()*/ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if !defined(MA_SUPPORT_AVX2) && !defined(MA_NO_AVX2) && __has_include() + #define MA_SUPPORT_AVX2 + #endif + #if !defined(MA_SUPPORT_AVX512) && !defined(MA_NO_AVX512) && __has_include() + #define MA_SUPPORT_AVX512 + #endif + #endif + + #if defined(MA_SUPPORT_AVX512) + #include /* Not a mistake. Intentionally including instead of because otherwise the compiler will complain. */ + #elif defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX) + #include + #elif defined(MA_SUPPORT_SSE2) + #include + #endif +#endif + +#if defined(MA_ARM) + #if !defined(MA_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + #define MA_SUPPORT_NEON + #endif + + /* Fall back to looking for the #include file. */ + #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) + #if !defined(MA_SUPPORT_NEON) && !defined(MA_NO_NEON) && __has_include() + #define MA_SUPPORT_NEON + #endif + #endif + + #if defined(MA_SUPPORT_NEON) + #include + #endif +#endif + +/* Begin globally disabled warnings. */ +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable:4752) /* found Intel(R) Advanced Vector Extensions; consider using /arch:AVX */ +#endif + +#if defined(MA_X64) || defined(MA_X86) + #if defined(_MSC_VER) && !defined(__clang__) + #if _MSC_VER >= 1400 + #include + static MA_INLINE void ma_cpuid(int info[4], int fid) + { + __cpuid(info, fid); + } + #else + #define MA_NO_CPUID + #endif + + #if _MSC_VER >= 1600 && (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219) + static MA_INLINE unsigned __int64 ma_xgetbv(int reg) + { + return _xgetbv(reg); + } + #else + #define MA_NO_XGETBV + #endif + #elif (defined(__GNUC__) || defined(__clang__)) && !defined(MA_ANDROID) + static MA_INLINE void ma_cpuid(int info[4], int fid) + { + /* + It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the + specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for + supporting different assembly dialects. + + What's basically happening is that we're saving and restoring the ebx register manually. + */ + #if defined(DRFLAC_X86) && defined(__PIC__) + __asm__ __volatile__ ( + "xchg{l} {%%}ebx, %k1;" + "cpuid;" + "xchg{l} {%%}ebx, %k1;" + : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #else + __asm__ __volatile__ ( + "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #endif + } + + static MA_INLINE ma_uint64 ma_xgetbv(int reg) + { + unsigned int hi; + unsigned int lo; + + __asm__ __volatile__ ( + "xgetbv" : "=a"(lo), "=d"(hi) : "c"(reg) + ); + + return ((ma_uint64)hi << 32) | (ma_uint64)lo; + } + #else + #define MA_NO_CPUID + #define MA_NO_XGETBV + #endif +#else + #define MA_NO_CPUID + #define MA_NO_XGETBV +#endif + +static MA_INLINE ma_bool32 ma_has_sse2() +{ +#if defined(MA_SUPPORT_SSE2) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_SSE2) + #if defined(MA_X64) + return MA_TRUE; /* 64-bit targets always support SSE2. */ + #elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__) + return MA_TRUE; /* If the compiler is allowed to freely generate SSE2 code we can assume support. */ + #else + #if defined(MA_NO_CPUID) + return MA_FALSE; + #else + int info[4]; + ma_cpuid(info, 1); + return (info[3] & (1 << 26)) != 0; + #endif + #endif + #else + return MA_FALSE; /* SSE2 is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#if 0 +static MA_INLINE ma_bool32 ma_has_avx() +{ +#if defined(MA_SUPPORT_AVX) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX) + #if defined(_AVX_) || defined(__AVX__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX code we can assume support. */ + #else + /* AVX requires both CPU and OS support. */ + #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; + #else + int info[4]; + ma_cpuid(info, 1); + if (((info[2] & (1 << 27)) != 0) && ((info[2] & (1 << 28)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + #endif + #endif + #else + return MA_FALSE; /* AVX is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} +#endif + +static MA_INLINE ma_bool32 ma_has_avx2() +{ +#if defined(MA_SUPPORT_AVX2) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX2) + #if defined(_AVX2_) || defined(__AVX2__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX2 code we can assume support. */ + #else + /* AVX2 requires both CPU and OS support. */ + #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; + #else + int info1[4]; + int info7[4]; + ma_cpuid(info1, 1); + ma_cpuid(info7, 7); + if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 5)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + #endif + #endif + #else + return MA_FALSE; /* AVX2 is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +static MA_INLINE ma_bool32 ma_has_avx512f() +{ +#if defined(MA_SUPPORT_AVX512) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX512) + #if defined(__AVX512F__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX-512F code we can assume support. */ + #else + /* AVX-512 requires both CPU and OS support. */ + #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; + #else + int info1[4]; + int info7[4]; + ma_cpuid(info1, 1); + ma_cpuid(info7, 7); + if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 16)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0xE6) == 0xE6) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + #endif + #endif + #else + return MA_FALSE; /* AVX-512F is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +static MA_INLINE ma_bool32 ma_has_neon() +{ +#if defined(MA_SUPPORT_NEON) + #if defined(MA_ARM) && !defined(MA_NO_NEON) + #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + return MA_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */ + #else + /* TODO: Runtime check. */ + return MA_FALSE; + #endif + #else + return MA_FALSE; /* NEON is only supported on ARM architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#define MA_SIMD_NONE 0 +#define MA_SIMD_SSE2 1 +#define MA_SIMD_AVX2 2 +#define MA_SIMD_NEON 3 + +#ifndef MA_PREFERRED_SIMD + # if defined(MA_SUPPORT_SSE2) && defined(MA_PREFER_SSE2) + #define MA_PREFERRED_SIMD MA_SIMD_SSE2 + #elif defined(MA_SUPPORT_AVX2) && defined(MA_PREFER_AVX2) + #define MA_PREFERRED_SIMD MA_SIMD_AVX2 + #elif defined(MA_SUPPORT_NEON) && defined(MA_PREFER_NEON) + #define MA_PREFERRED_SIMD MA_SIMD_NEON + #else + #define MA_PREFERRED_SIMD MA_SIMD_NONE + #endif +#endif + + +static MA_INLINE ma_bool32 ma_is_little_endian() +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} + +static MA_INLINE ma_bool32 ma_is_big_endian() +{ + return !ma_is_little_endian(); +} + + +#ifndef MA_COINIT_VALUE +#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED */ +#endif + + + +#ifndef MA_PI +#define MA_PI 3.14159265358979323846264f +#endif +#ifndef MA_PI_D +#define MA_PI_D 3.14159265358979323846264 +#endif +#ifndef MA_TAU +#define MA_TAU 6.28318530717958647693f +#endif +#ifndef MA_TAU_D +#define MA_TAU_D 6.28318530717958647693 +#endif + + +/* The default format when ma_format_unknown (0) is requested when initializing a device. */ +#ifndef MA_DEFAULT_FORMAT +#define MA_DEFAULT_FORMAT ma_format_f32 +#endif + +/* The default channel count to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_CHANNELS +#define MA_DEFAULT_CHANNELS 2 +#endif + +/* The default sample rate to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_SAMPLE_RATE +#define MA_DEFAULT_SAMPLE_RATE 48000 +#endif + +/* Default periods when none is specified in ma_device_init(). More periods means more work on the CPU. */ +#ifndef MA_DEFAULT_PERIODS +#define MA_DEFAULT_PERIODS 3 +#endif + +/* The default period size in milliseconds for low latency mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY 10 +#endif + +/* The default buffer size in milliseconds for conservative mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE 100 +#endif + +/* The default LPF filter order for linear resampling. Note that this is clamped to MA_MAX_FILTER_ORDER. */ +#ifndef MA_DEFAULT_RESAMPLER_LPF_ORDER + #if MA_MAX_FILTER_ORDER >= 4 + #define MA_DEFAULT_RESAMPLER_LPF_ORDER 4 + #else + #define MA_DEFAULT_RESAMPLER_LPF_ORDER MA_MAX_FILTER_ORDER + #endif +#endif + + +#if defined(__GNUC__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-variable" +#endif + +#ifndef MA_LINUX +/* Standard sample rates, in order of priority. */ +static ma_uint32 g_maStandardSampleRatePriorities[] = { + MA_SAMPLE_RATE_48000, /* Most common */ + MA_SAMPLE_RATE_44100, + + MA_SAMPLE_RATE_32000, /* Lows */ + MA_SAMPLE_RATE_24000, + MA_SAMPLE_RATE_22050, + + MA_SAMPLE_RATE_88200, /* Highs */ + MA_SAMPLE_RATE_96000, + MA_SAMPLE_RATE_176400, + MA_SAMPLE_RATE_192000, + + MA_SAMPLE_RATE_16000, /* Extreme lows */ + MA_SAMPLE_RATE_11025, + MA_SAMPLE_RATE_8000, + + MA_SAMPLE_RATE_352800, /* Extreme highs */ + MA_SAMPLE_RATE_384000 +}; +#endif + +static ma_format g_maFormatPriorities[] = { + ma_format_s16, /* Most common */ + ma_format_f32, + + /*ma_format_s24_32,*/ /* Clean alignment */ + ma_format_s32, + + ma_format_s24, /* Unclean alignment */ + + ma_format_u8 /* Low quality */ +}; +#if defined(__GNUC__) + #pragma GCC diagnostic pop +#endif + + +/****************************************************************************** + +Standard Library Stuff + +******************************************************************************/ +#ifndef MA_MALLOC +#ifdef MA_WIN32 +#define MA_MALLOC(sz) HeapAlloc(GetProcessHeap(), 0, (sz)) +#else +#define MA_MALLOC(sz) malloc((sz)) +#endif +#endif + +#ifndef MA_REALLOC +#ifdef MA_WIN32 +#define MA_REALLOC(p, sz) (((sz) > 0) ? ((p) ? HeapReAlloc(GetProcessHeap(), 0, (p), (sz)) : HeapAlloc(GetProcessHeap(), 0, (sz))) : ((VOID*)(size_t)(HeapFree(GetProcessHeap(), 0, (p)) & 0))) +#else +#define MA_REALLOC(p, sz) realloc((p), (sz)) +#endif +#endif + +#ifndef MA_FREE +#ifdef MA_WIN32 +#define MA_FREE(p) HeapFree(GetProcessHeap(), 0, (p)) +#else +#define MA_FREE(p) free((p)) +#endif +#endif + +#ifndef MA_ZERO_MEMORY +#ifdef MA_WIN32 +#define MA_ZERO_MEMORY(p, sz) ZeroMemory((p), (sz)) +#else +#define MA_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#endif + +#ifndef MA_COPY_MEMORY +#ifdef MA_WIN32 +#define MA_COPY_MEMORY(dst, src, sz) CopyMemory((dst), (src), (sz)) +#else +#define MA_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#endif + +#ifndef MA_ASSERT +#ifdef MA_WIN32 +#define MA_ASSERT(condition) assert(condition) +#else +#define MA_ASSERT(condition) assert(condition) +#endif +#endif + +#define MA_ZERO_OBJECT(p) MA_ZERO_MEMORY((p), sizeof(*(p))) + +#define ma_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_max(x, y) (((x) > (y)) ? (x) : (y)) +#define ma_min(x, y) (((x) < (y)) ? (x) : (y)) +#define ma_abs(x) (((x) > 0) ? (x) : -(x)) +#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi))) +#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) + +#define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels)) + +static MA_INLINE double ma_sin(double x) +{ + /* TODO: Implement custom sin(x). */ + return sin(x); +} + +static MA_INLINE double ma_exp(double x) +{ + /* TODO: Implement custom exp(x). */ + return exp(x); +} + +static MA_INLINE double ma_log(double x) +{ + /* TODO: Implement custom log(x). */ + return log(x); +} + +static MA_INLINE double ma_pow(double x, double y) +{ + /* TODO: Implement custom pow(x, y). */ + return pow(x, y); +} + +static MA_INLINE double ma_sqrt(double x) +{ + /* TODO: Implement custom sqrt(x). */ + return sqrt(x); +} + + +static MA_INLINE double ma_cos(double x) +{ + return ma_sin((MA_PI_D*0.5) - x); +} + +static MA_INLINE double ma_log10(double x) +{ + return ma_log(x) * 0.43429448190325182765; +} + +static MA_INLINE float ma_powf(float x, float y) +{ + return (float)ma_pow((double)x, (double)y); +} + +static MA_INLINE float ma_log10f(float x) +{ + return (float)ma_log10((double)x); +} + + +/* +Return Values: + 0: Success + 22: EINVAL + 34: ERANGE + +Not using symbolic constants for errors because I want to avoid #including errno.h +*/ +MA_API int ma_strcpy_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstSizeInBytes && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstSizeInBytes) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API int ma_strncpy_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + size_t maxcount; + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + maxcount = count; + if (count == ((size_t)-1) || count >= dstSizeInBytes) { /* -1 = _TRUNCATE */ + maxcount = dstSizeInBytes - 1; + } + + for (i = 0; i < maxcount && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (src[i] == '\0' || i == count || count == ((size_t)-1)) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API int ma_strcat_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + while (dstSizeInBytes > 0 && src[0] != '\0') { + *dst++ = *src++; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API int ma_strncat_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + if (count == ((size_t)-1)) { /* _TRUNCATE */ + count = dstSizeInBytes - 1; + } + + while (dstSizeInBytes > 0 && src[0] != '\0' && count > 0) { + *dst++ = *src++; + dstSizeInBytes -= 1; + count -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API int ma_itoa_s(int value, char* dst, size_t dstSizeInBytes, int radix) +{ + int sign; + unsigned int valueU; + char* dstEnd; + + if (dst == NULL || dstSizeInBytes == 0) { + return 22; + } + if (radix < 2 || radix > 36) { + dst[0] = '\0'; + return 22; + } + + sign = (value < 0 && radix == 10) ? -1 : 1; /* The negative sign is only used when the base is 10. */ + + if (value < 0) { + valueU = -value; + } else { + valueU = value; + } + + dstEnd = dst; + do + { + int remainder = valueU % radix; + if (remainder > 9) { + *dstEnd = (char)((remainder - 10) + 'a'); + } else { + *dstEnd = (char)(remainder + '0'); + } + + dstEnd += 1; + dstSizeInBytes -= 1; + valueU /= radix; + } while (dstSizeInBytes > 0 && valueU > 0); + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + if (sign < 0) { + *dstEnd++ = '-'; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + *dstEnd = '\0'; + + + /* At this point the string will be reversed. */ + dstEnd -= 1; + while (dst < dstEnd) { + char temp = *dst; + *dst = *dstEnd; + *dstEnd = temp; + + dst += 1; + dstEnd -= 1; + } + + return 0; +} + +MA_API int ma_strcmp(const char* str1, const char* str2) +{ + if (str1 == str2) return 0; + + /* These checks differ from the standard implementation. It's not important, but I prefer it just for sanity. */ + if (str1 == NULL) return -1; + if (str2 == NULL) return 1; + + for (;;) { + if (str1[0] == '\0') { + break; + } + if (str1[0] != str2[0]) { + break; + } + + str1 += 1; + str2 += 1; + } + + return ((unsigned char*)str1)[0] - ((unsigned char*)str2)[0]; +} + +MA_API int ma_strappend(char* dst, size_t dstSize, const char* srcA, const char* srcB) +{ + int result; + + result = ma_strncpy_s(dst, dstSize, srcA, (size_t)-1); + if (result != 0) { + return result; + } + + result = ma_strncat_s(dst, dstSize, srcB, (size_t)-1); + if (result != 0) { + return result; + } + + return result; +} + +MA_API char* ma_copy_string(const char* src, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t sz = strlen(src)+1; + char* dst = (char*)ma_malloc(sz, pAllocationCallbacks); + if (dst == NULL) { + return NULL; + } + + ma_strcpy_s(dst, sz, src); + + return dst; +} + + +#include +static ma_result ma_result_from_errno(int e) +{ + switch (e) + { + case 0: return MA_SUCCESS; + #ifdef EPERM + case EPERM: return MA_INVALID_OPERATION; + #endif + #ifdef ENOENT + case ENOENT: return MA_DOES_NOT_EXIST; + #endif + #ifdef ESRCH + case ESRCH: return MA_DOES_NOT_EXIST; + #endif + #ifdef EINTR + case EINTR: return MA_INTERRUPT; + #endif + #ifdef EIO + case EIO: return MA_IO_ERROR; + #endif + #ifdef ENXIO + case ENXIO: return MA_DOES_NOT_EXIST; + #endif + #ifdef E2BIG + case E2BIG: return MA_INVALID_ARGS; + #endif + #ifdef ENOEXEC + case ENOEXEC: return MA_INVALID_FILE; + #endif + #ifdef EBADF + case EBADF: return MA_INVALID_FILE; + #endif + #ifdef ECHILD + case ECHILD: return MA_ERROR; + #endif + #ifdef EAGAIN + case EAGAIN: return MA_UNAVAILABLE; + #endif + #ifdef ENOMEM + case ENOMEM: return MA_OUT_OF_MEMORY; + #endif + #ifdef EACCES + case EACCES: return MA_ACCESS_DENIED; + #endif + #ifdef EFAULT + case EFAULT: return MA_BAD_ADDRESS; + #endif + #ifdef ENOTBLK + case ENOTBLK: return MA_ERROR; + #endif + #ifdef EBUSY + case EBUSY: return MA_BUSY; + #endif + #ifdef EEXIST + case EEXIST: return MA_ALREADY_EXISTS; + #endif + #ifdef EXDEV + case EXDEV: return MA_ERROR; + #endif + #ifdef ENODEV + case ENODEV: return MA_DOES_NOT_EXIST; + #endif + #ifdef ENOTDIR + case ENOTDIR: return MA_NOT_DIRECTORY; + #endif + #ifdef EISDIR + case EISDIR: return MA_IS_DIRECTORY; + #endif + #ifdef EINVAL + case EINVAL: return MA_INVALID_ARGS; + #endif + #ifdef ENFILE + case ENFILE: return MA_TOO_MANY_OPEN_FILES; + #endif + #ifdef EMFILE + case EMFILE: return MA_TOO_MANY_OPEN_FILES; + #endif + #ifdef ENOTTY + case ENOTTY: return MA_INVALID_OPERATION; + #endif + #ifdef ETXTBSY + case ETXTBSY: return MA_BUSY; + #endif + #ifdef EFBIG + case EFBIG: return MA_TOO_BIG; + #endif + #ifdef ENOSPC + case ENOSPC: return MA_NO_SPACE; + #endif + #ifdef ESPIPE + case ESPIPE: return MA_BAD_SEEK; + #endif + #ifdef EROFS + case EROFS: return MA_ACCESS_DENIED; + #endif + #ifdef EMLINK + case EMLINK: return MA_TOO_MANY_LINKS; + #endif + #ifdef EPIPE + case EPIPE: return MA_BAD_PIPE; + #endif + #ifdef EDOM + case EDOM: return MA_OUT_OF_RANGE; + #endif + #ifdef ERANGE + case ERANGE: return MA_OUT_OF_RANGE; + #endif + #ifdef EDEADLK + case EDEADLK: return MA_DEADLOCK; + #endif + #ifdef ENAMETOOLONG + case ENAMETOOLONG: return MA_PATH_TOO_LONG; + #endif + #ifdef ENOLCK + case ENOLCK: return MA_ERROR; + #endif + #ifdef ENOSYS + case ENOSYS: return MA_NOT_IMPLEMENTED; + #endif + #ifdef ENOTEMPTY + case ENOTEMPTY: return MA_DIRECTORY_NOT_EMPTY; + #endif + #ifdef ELOOP + case ELOOP: return MA_TOO_MANY_LINKS; + #endif + #ifdef ENOMSG + case ENOMSG: return MA_NO_MESSAGE; + #endif + #ifdef EIDRM + case EIDRM: return MA_ERROR; + #endif + #ifdef ECHRNG + case ECHRNG: return MA_ERROR; + #endif + #ifdef EL2NSYNC + case EL2NSYNC: return MA_ERROR; + #endif + #ifdef EL3HLT + case EL3HLT: return MA_ERROR; + #endif + #ifdef EL3RST + case EL3RST: return MA_ERROR; + #endif + #ifdef ELNRNG + case ELNRNG: return MA_OUT_OF_RANGE; + #endif + #ifdef EUNATCH + case EUNATCH: return MA_ERROR; + #endif + #ifdef ENOCSI + case ENOCSI: return MA_ERROR; + #endif + #ifdef EL2HLT + case EL2HLT: return MA_ERROR; + #endif + #ifdef EBADE + case EBADE: return MA_ERROR; + #endif + #ifdef EBADR + case EBADR: return MA_ERROR; + #endif + #ifdef EXFULL + case EXFULL: return MA_ERROR; + #endif + #ifdef ENOANO + case ENOANO: return MA_ERROR; + #endif + #ifdef EBADRQC + case EBADRQC: return MA_ERROR; + #endif + #ifdef EBADSLT + case EBADSLT: return MA_ERROR; + #endif + #ifdef EBFONT + case EBFONT: return MA_INVALID_FILE; + #endif + #ifdef ENOSTR + case ENOSTR: return MA_ERROR; + #endif + #ifdef ENODATA + case ENODATA: return MA_NO_DATA_AVAILABLE; + #endif + #ifdef ETIME + case ETIME: return MA_TIMEOUT; + #endif + #ifdef ENOSR + case ENOSR: return MA_NO_DATA_AVAILABLE; + #endif + #ifdef ENONET + case ENONET: return MA_NO_NETWORK; + #endif + #ifdef ENOPKG + case ENOPKG: return MA_ERROR; + #endif + #ifdef EREMOTE + case EREMOTE: return MA_ERROR; + #endif + #ifdef ENOLINK + case ENOLINK: return MA_ERROR; + #endif + #ifdef EADV + case EADV: return MA_ERROR; + #endif + #ifdef ESRMNT + case ESRMNT: return MA_ERROR; + #endif + #ifdef ECOMM + case ECOMM: return MA_ERROR; + #endif + #ifdef EPROTO + case EPROTO: return MA_ERROR; + #endif + #ifdef EMULTIHOP + case EMULTIHOP: return MA_ERROR; + #endif + #ifdef EDOTDOT + case EDOTDOT: return MA_ERROR; + #endif + #ifdef EBADMSG + case EBADMSG: return MA_BAD_MESSAGE; + #endif + #ifdef EOVERFLOW + case EOVERFLOW: return MA_TOO_BIG; + #endif + #ifdef ENOTUNIQ + case ENOTUNIQ: return MA_NOT_UNIQUE; + #endif + #ifdef EBADFD + case EBADFD: return MA_ERROR; + #endif + #ifdef EREMCHG + case EREMCHG: return MA_ERROR; + #endif + #ifdef ELIBACC + case ELIBACC: return MA_ACCESS_DENIED; + #endif + #ifdef ELIBBAD + case ELIBBAD: return MA_INVALID_FILE; + #endif + #ifdef ELIBSCN + case ELIBSCN: return MA_INVALID_FILE; + #endif + #ifdef ELIBMAX + case ELIBMAX: return MA_ERROR; + #endif + #ifdef ELIBEXEC + case ELIBEXEC: return MA_ERROR; + #endif + #ifdef EILSEQ + case EILSEQ: return MA_INVALID_DATA; + #endif + #ifdef ERESTART + case ERESTART: return MA_ERROR; + #endif + #ifdef ESTRPIPE + case ESTRPIPE: return MA_ERROR; + #endif + #ifdef EUSERS + case EUSERS: return MA_ERROR; + #endif + #ifdef ENOTSOCK + case ENOTSOCK: return MA_NOT_SOCKET; + #endif + #ifdef EDESTADDRREQ + case EDESTADDRREQ: return MA_NO_ADDRESS; + #endif + #ifdef EMSGSIZE + case EMSGSIZE: return MA_TOO_BIG; + #endif + #ifdef EPROTOTYPE + case EPROTOTYPE: return MA_BAD_PROTOCOL; + #endif + #ifdef ENOPROTOOPT + case ENOPROTOOPT: return MA_PROTOCOL_UNAVAILABLE; + #endif + #ifdef EPROTONOSUPPORT + case EPROTONOSUPPORT: return MA_PROTOCOL_NOT_SUPPORTED; + #endif + #ifdef ESOCKTNOSUPPORT + case ESOCKTNOSUPPORT: return MA_SOCKET_NOT_SUPPORTED; + #endif + #ifdef EOPNOTSUPP + case EOPNOTSUPP: return MA_INVALID_OPERATION; + #endif + #ifdef EPFNOSUPPORT + case EPFNOSUPPORT: return MA_PROTOCOL_FAMILY_NOT_SUPPORTED; + #endif + #ifdef EAFNOSUPPORT + case EAFNOSUPPORT: return MA_ADDRESS_FAMILY_NOT_SUPPORTED; + #endif + #ifdef EADDRINUSE + case EADDRINUSE: return MA_ALREADY_IN_USE; + #endif + #ifdef EADDRNOTAVAIL + case EADDRNOTAVAIL: return MA_ERROR; + #endif + #ifdef ENETDOWN + case ENETDOWN: return MA_NO_NETWORK; + #endif + #ifdef ENETUNREACH + case ENETUNREACH: return MA_NO_NETWORK; + #endif + #ifdef ENETRESET + case ENETRESET: return MA_NO_NETWORK; + #endif + #ifdef ECONNABORTED + case ECONNABORTED: return MA_NO_NETWORK; + #endif + #ifdef ECONNRESET + case ECONNRESET: return MA_CONNECTION_RESET; + #endif + #ifdef ENOBUFS + case ENOBUFS: return MA_NO_SPACE; + #endif + #ifdef EISCONN + case EISCONN: return MA_ALREADY_CONNECTED; + #endif + #ifdef ENOTCONN + case ENOTCONN: return MA_NOT_CONNECTED; + #endif + #ifdef ESHUTDOWN + case ESHUTDOWN: return MA_ERROR; + #endif + #ifdef ETOOMANYREFS + case ETOOMANYREFS: return MA_ERROR; + #endif + #ifdef ETIMEDOUT + case ETIMEDOUT: return MA_TIMEOUT; + #endif + #ifdef ECONNREFUSED + case ECONNREFUSED: return MA_CONNECTION_REFUSED; + #endif + #ifdef EHOSTDOWN + case EHOSTDOWN: return MA_NO_HOST; + #endif + #ifdef EHOSTUNREACH + case EHOSTUNREACH: return MA_NO_HOST; + #endif + #ifdef EALREADY + case EALREADY: return MA_IN_PROGRESS; + #endif + #ifdef EINPROGRESS + case EINPROGRESS: return MA_IN_PROGRESS; + #endif + #ifdef ESTALE + case ESTALE: return MA_INVALID_FILE; + #endif + #ifdef EUCLEAN + case EUCLEAN: return MA_ERROR; + #endif + #ifdef ENOTNAM + case ENOTNAM: return MA_ERROR; + #endif + #ifdef ENAVAIL + case ENAVAIL: return MA_ERROR; + #endif + #ifdef EISNAM + case EISNAM: return MA_ERROR; + #endif + #ifdef EREMOTEIO + case EREMOTEIO: return MA_IO_ERROR; + #endif + #ifdef EDQUOT + case EDQUOT: return MA_NO_SPACE; + #endif + #ifdef ENOMEDIUM + case ENOMEDIUM: return MA_DOES_NOT_EXIST; + #endif + #ifdef EMEDIUMTYPE + case EMEDIUMTYPE: return MA_ERROR; + #endif + #ifdef ECANCELED + case ECANCELED: return MA_CANCELLED; + #endif + #ifdef ENOKEY + case ENOKEY: return MA_ERROR; + #endif + #ifdef EKEYEXPIRED + case EKEYEXPIRED: return MA_ERROR; + #endif + #ifdef EKEYREVOKED + case EKEYREVOKED: return MA_ERROR; + #endif + #ifdef EKEYREJECTED + case EKEYREJECTED: return MA_ERROR; + #endif + #ifdef EOWNERDEAD + case EOWNERDEAD: return MA_ERROR; + #endif + #ifdef ENOTRECOVERABLE + case ENOTRECOVERABLE: return MA_ERROR; + #endif + #ifdef ERFKILL + case ERFKILL: return MA_ERROR; + #endif + #ifdef EHWPOISON + case EHWPOISON: return MA_ERROR; + #endif + default: return MA_ERROR; + } +} + +MA_API ma_result ma_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) +{ +#if _MSC_VER && _MSC_VER >= 1400 + errno_t err; +#endif + + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if _MSC_VER && _MSC_VER >= 1400 + err = fopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } +#else +#if defined(_WIN32) || defined(__APPLE__) + *ppFile = fopen(pFilePath, pOpenMode); +#else + #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE) + *ppFile = fopen64(pFilePath, pOpenMode); + #else + *ppFile = fopen(pFilePath, pOpenMode); + #endif +#endif + if (*ppFile == NULL) { + ma_result result = ma_result_from_errno(errno); + if (result == MA_SUCCESS) { + result = MA_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */ + } + + return result; + } +#endif + + return MA_SUCCESS; +} + + + +/* +_wfopen() isn't always available in all compilation environments. + + * Windows only. + * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). + * MinGW-64 (both 32- and 64-bit) seems to support it. + * MinGW wraps it in !defined(__STRICT_ANSI__). + +This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() +fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. +*/ +#if defined(_WIN32) + #if defined(_MSC_VER) || defined(__MINGW64__) || !defined(__STRICT_ANSI__) + #define MA_HAS_WFOPEN + #endif +#endif + +MA_API ma_result ma_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_HAS_WFOPEN) + { + /* Use _wfopen() on Windows. */ + #if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err = _wfopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } + #else + *ppFile = _wfopen(pFilePath, pOpenMode); + if (*ppFile == NULL) { + return ma_result_from_errno(errno); + } + #endif + (void)pAllocationCallbacks; + } +#else + /* + Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can + think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for + maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility. + */ + { + mbstate_t mbs; + size_t lenMB; + const wchar_t* pFilePathTemp = pFilePath; + char* pFilePathMB = NULL; + char pOpenModeMB[32] = {0}; + + /* Get the length first. */ + MA_ZERO_OBJECT(&mbs); + lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); + if (lenMB == (size_t)-1) { + return ma_result_from_errno(errno); + } + + pFilePathMB = (char*)ma_malloc(lenMB + 1, pAllocationCallbacks); + if (pFilePathMB == NULL) { + return MA_OUT_OF_MEMORY; + } + + pFilePathTemp = pFilePath; + MA_ZERO_OBJECT(&mbs); + wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); + + /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */ + { + size_t i = 0; + for (;;) { + if (pOpenMode[i] == 0) { + pOpenModeMB[i] = '\0'; + break; + } + + pOpenModeMB[i] = (char)pOpenMode[i]; + i += 1; + } + } + + *ppFile = fopen(pFilePathMB, pOpenModeMB); + + ma_free(pFilePathMB, pAllocationCallbacks); + } + + if (*ppFile == NULL) { + return MA_ERROR; + } +#endif + + return MA_SUCCESS; +} + + + +static MA_INLINE void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_COPY_MEMORY(dst, src, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToCopyNow = sizeInBytes; + if (bytesToCopyNow > MA_SIZE_MAX) { + bytesToCopyNow = MA_SIZE_MAX; + } + + MA_COPY_MEMORY(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToCopyNow; + dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow); + src = (const void*)((const ma_uint8*)src + bytesToCopyNow); + } +#endif +} + +static MA_INLINE void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_ZERO_MEMORY(dst, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToZeroNow = sizeInBytes; + if (bytesToZeroNow > MA_SIZE_MAX) { + bytesToZeroNow = MA_SIZE_MAX; + } + + MA_ZERO_MEMORY(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToZeroNow; + dst = (void*)((ma_uint8*)dst + bytesToZeroNow); + } +#endif +} + + +/* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x) +{ + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + + return x; +} + +static MA_INLINE unsigned int ma_prev_power_of_2(unsigned int x) +{ + return ma_next_power_of_2(x) >> 1; +} + +static MA_INLINE unsigned int ma_round_to_power_of_2(unsigned int x) +{ + unsigned int prev = ma_prev_power_of_2(x); + unsigned int next = ma_next_power_of_2(x); + if ((next - x) > (x - prev)) { + return prev; + } else { + return next; + } +} + +static MA_INLINE unsigned int ma_count_set_bits(unsigned int x) +{ + unsigned int count = 0; + while (x != 0) { + if (x & 1) { + count += 1; + } + + x = x >> 1; + } + + return count; +} + + + +/* Clamps an f32 sample to -1..1 */ +static MA_INLINE float ma_clip_f32(float x) +{ + if (x < -1) return -1; + if (x > +1) return +1; + return x; +} + +static MA_INLINE float ma_mix_f32(float x, float y, float a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE float ma_mix_f32_fast(float x, float y, float a) +{ + float r0 = (y - x); + float r1 = r0*a; + return x + r1; + /*return x + (y - x)*a;*/ +} + + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a) +{ + return _mm_add_ps(x, _mm_mul_ps(_mm_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE __m256 ma_mix_f32_fast__avx2(__m256 x, __m256 y, __m256 a) +{ + return _mm256_add_ps(x, _mm256_mul_ps(_mm256_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_AVX512) +static MA_INLINE __m512 ma_mix_f32_fast__avx512(__m512 x, __m512 y, __m512 a) +{ + return _mm512_add_ps(x, _mm512_mul_ps(_mm512_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE float32x4_t ma_mix_f32_fast__neon(float32x4_t x, float32x4_t y, float32x4_t a) +{ + return vaddq_f32(x, vmulq_f32(vsubq_f32(y, x), a)); +} +#endif + + +static MA_INLINE double ma_mix_f64(double x, double y, double a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE double ma_mix_f64_fast(double x, double y, double a) +{ + return x + (y - x)*a; +} + +static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi) +{ + return lo + x*(hi-lo); +} + + +/* +Greatest common factor using Euclid's algorithm iteratively. +*/ +static MA_INLINE ma_uint32 ma_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + + return a; +} + + +/* +Random Number Generation + +miniaudio uses the LCG random number generation algorithm. This is good enough for audio. + +Note that miniaudio's global LCG implementation uses global state which is _not_ thread-local. When this is called across +multiple threads, results will be unpredictable. However, it won't crash and results will still be random enough for +miniaudio's purposes. +*/ +#ifndef MA_DEFAULT_LCG_SEED +#define MA_DEFAULT_LCG_SEED 4321 +#endif + +#define MA_LCG_M 2147483647 +#define MA_LCG_A 48271 +#define MA_LCG_C 0 + +static ma_lcg g_maLCG = {MA_DEFAULT_LCG_SEED}; /* Non-zero initial seed. Use ma_seed() to use an explicit seed. */ + +static MA_INLINE void ma_lcg_seed(ma_lcg* pLCG, ma_int32 seed) +{ + MA_ASSERT(pLCG != NULL); + pLCG->state = seed; +} + +static MA_INLINE ma_int32 ma_lcg_rand_s32(ma_lcg* pLCG) +{ + pLCG->state = (MA_LCG_A * pLCG->state + MA_LCG_C) % MA_LCG_M; + return pLCG->state; +} + +static MA_INLINE ma_uint32 ma_lcg_rand_u32(ma_lcg* pLCG) +{ + return (ma_uint32)ma_lcg_rand_s32(pLCG); +} + +static MA_INLINE ma_int16 ma_lcg_rand_s16(ma_lcg* pLCG) +{ + return (ma_int16)(ma_lcg_rand_s32(pLCG) & 0xFFFF); +} + +static MA_INLINE double ma_lcg_rand_f64(ma_lcg* pLCG) +{ + return ma_lcg_rand_s32(pLCG) / (double)0x7FFFFFFF; +} + +static MA_INLINE float ma_lcg_rand_f32(ma_lcg* pLCG) +{ + return (float)ma_lcg_rand_f64(pLCG); +} + +static MA_INLINE float ma_lcg_rand_range_f32(ma_lcg* pLCG, float lo, float hi) +{ + return ma_scale_to_range_f32(ma_lcg_rand_f32(pLCG), lo, hi); +} + +static MA_INLINE ma_int32 ma_lcg_rand_range_s32(ma_lcg* pLCG, ma_int32 lo, ma_int32 hi) +{ + if (lo == hi) { + return lo; + } + + return lo + ma_lcg_rand_u32(pLCG) / (0xFFFFFFFF / (hi - lo + 1) + 1); +} + + + +static MA_INLINE void ma_seed(ma_int32 seed) +{ + ma_lcg_seed(&g_maLCG, seed); +} + +static MA_INLINE ma_int32 ma_rand_s32() +{ + return ma_lcg_rand_s32(&g_maLCG); +} + +static MA_INLINE ma_uint32 ma_rand_u32() +{ + return ma_lcg_rand_u32(&g_maLCG); +} + +static MA_INLINE double ma_rand_f64() +{ + return ma_lcg_rand_f64(&g_maLCG); +} + +static MA_INLINE float ma_rand_f32() +{ + return ma_lcg_rand_f32(&g_maLCG); +} + +static MA_INLINE float ma_rand_range_f32(float lo, float hi) +{ + return ma_lcg_rand_range_f32(&g_maLCG, lo, hi); +} + +static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi) +{ + return ma_lcg_rand_range_s32(&g_maLCG, lo, hi); +} + + +static MA_INLINE float ma_dither_f32_rectangle(float ditherMin, float ditherMax) +{ + return ma_rand_range_f32(ditherMin, ditherMax); +} + +static MA_INLINE float ma_dither_f32_triangle(float ditherMin, float ditherMax) +{ + float a = ma_rand_range_f32(ditherMin, 0); + float b = ma_rand_range_f32(0, ditherMax); + return a + b; +} + +static MA_INLINE float ma_dither_f32(ma_dither_mode ditherMode, float ditherMin, float ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + return ma_dither_f32_rectangle(ditherMin, ditherMax); + } + if (ditherMode == ma_dither_mode_triangle) { + return ma_dither_f32_triangle(ditherMin, ditherMax); + } + + return 0; +} + +static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 ditherMin, ma_int32 ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, ditherMax); + return a; + } + if (ditherMode == ma_dither_mode_triangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, 0); + ma_int32 b = ma_rand_range_s32(0, ditherMax); + return a + b; + } + + return 0; +} + + +/****************************************************************************** + +Atomics + +******************************************************************************/ +#if defined(__clang__) + #if defined(__has_builtin) + #if __has_builtin(__sync_swap) + #define MA_HAS_SYNC_SWAP + #endif + #endif +#elif defined(__GNUC__) + #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC__ >= 7) + #define MA_HAS_GNUC_ATOMICS + #endif +#endif + +#if defined(_WIN32) && !defined(__GNUC__) && !defined(__clang__) +#define ma_memory_barrier() MemoryBarrier() +#define ma_atomic_exchange_32(a, b) InterlockedExchange((LONG*)a, (LONG)b) +#define ma_atomic_exchange_64(a, b) InterlockedExchange64((LONGLONG*)a, (LONGLONG)b) +#define ma_atomic_increment_32(a) InterlockedIncrement((LONG*)a) +#define ma_atomic_decrement_32(a) InterlockedDecrement((LONG*)a) +#else +#define ma_memory_barrier() __sync_synchronize() +#if defined(MA_HAS_SYNC_SWAP) + #define ma_atomic_exchange_32(a, b) __sync_swap(a, b) + #define ma_atomic_exchange_64(a, b) __sync_swap(a, b) +#elif defined(MA_HAS_GNUC_ATOMICS) + #define ma_atomic_exchange_32(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL) + #define ma_atomic_exchange_64(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL) +#else + #define ma_atomic_exchange_32(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b) + #define ma_atomic_exchange_64(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b) +#endif +#define ma_atomic_increment_32(a) __sync_add_and_fetch(a, 1) +#define ma_atomic_decrement_32(a) __sync_sub_and_fetch(a, 1) +#endif + +#ifdef MA_64BIT +#define ma_atomic_exchange_ptr ma_atomic_exchange_64 +#endif +#ifdef MA_32BIT +#define ma_atomic_exchange_ptr ma_atomic_exchange_32 +#endif + + +static void* ma__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_MALLOC(sz); +} + +static void* ma__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_REALLOC(p, sz); +} + +static void ma__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_FREE(p); +} + + +static void* ma__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + + /* Try using realloc(). */ + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + + return NULL; +} + +static void* ma__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + + /* Try emulating realloc() in terms of malloc()/free(). */ + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + + if (p != NULL) { + MA_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + + return p2; + } + + return NULL; +} + +static MA_INLINE void* ma__calloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + void* p = ma__malloc_from_callbacks(sz, pAllocationCallbacks); + if (p != NULL) { + MA_ZERO_MEMORY(p, sz); + } + + return p; +} + +static void ma__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} + +static ma_allocation_callbacks ma_allocation_callbacks_init_default() +{ + ma_allocation_callbacks callbacks; + callbacks.pUserData = NULL; + callbacks.onMalloc = ma__malloc_default; + callbacks.onRealloc = ma__realloc_default; + callbacks.onFree = ma__free_default; + + return callbacks; +} + +static ma_result ma_allocation_callbacks_init_copy(ma_allocation_callbacks* pDst, const ma_allocation_callbacks* pSrc) +{ + if (pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pSrc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->pUserData == NULL && pSrc->onFree == NULL && pSrc->onMalloc == NULL && pSrc->onRealloc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->onFree == NULL || (pSrc->onMalloc == NULL && pSrc->onRealloc == NULL)) { + return MA_INVALID_ARGS; /* Invalid allocation callbacks. */ + } else { + *pDst = *pSrc; + } + } + } + + return MA_SUCCESS; +} + + +MA_API ma_uint64 ma_calculate_frame_count_after_resampling(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn) +{ + /* For robustness we're going to use a resampler object to calculate this since that already has a way of calculating this. */ + ma_result result; + ma_uint64 frameCountOut; + ma_resampler_config config; + ma_resampler resampler; + + config = ma_resampler_config_init(ma_format_s16, 1, sampleRateIn, sampleRateOut, ma_resample_algorithm_linear); + result = ma_resampler_init(&config, &resampler); + if (result != MA_SUCCESS) { + return 0; + } + + frameCountOut = ma_resampler_get_expected_output_frame_count(&resampler, frameCountIn); + + ma_resampler_uninit(&resampler); + return frameCountOut; +} + +#ifndef MA_DATA_CONVERTER_STACK_BUFFER_SIZE +#define MA_DATA_CONVERTER_STACK_BUFFER_SIZE 4096 +#endif + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#ifndef MA_NO_DEVICE_IO +#ifdef MA_WIN32 + #include + #include + #include +#endif + +#if defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200) + #include /* For mach_absolute_time() */ +#endif + +#ifdef MA_POSIX + #include + #include + #include + #include +#endif + +/* +Unfortunately using runtime linking for pthreads causes problems. This has occurred for me when testing on FreeBSD. When +using runtime linking, deadlocks can occur (for me it happens when loading data from fread()). It turns out that doing +compile-time linking fixes this. I'm not sure why this happens, but the safest way I can think of to fix this is to simply +disable runtime linking by default. To enable runtime linking, #define this before the implementation of this file. I am +not officially supporting this, but I'm leaving it here in case it's useful for somebody, somewhere. +*/ +/*#define MA_USE_RUNTIME_LINKING_FOR_PTHREAD*/ + +/* Disable run-time linking on certain backends. */ +#ifndef MA_NO_RUNTIME_LINKING + #if defined(MA_ANDROID) || defined(MA_EMSCRIPTEN) + #define MA_NO_RUNTIME_LINKING + #endif +#endif + +/* +Check if we have the necessary development packages for each backend at the top so we can use this to determine whether or not +certain unused functions and variables can be excluded from the build to avoid warnings. +*/ +#ifdef MA_ENABLE_WASAPI + #define MA_HAS_WASAPI /* Every compiler should support WASAPI */ +#endif +#ifdef MA_ENABLE_DSOUND + #define MA_HAS_DSOUND /* Every compiler should support DirectSound. */ +#endif +#ifdef MA_ENABLE_WINMM + #define MA_HAS_WINMM /* Every compiler I'm aware of supports WinMM. */ +#endif +#ifdef MA_ENABLE_ALSA + #define MA_HAS_ALSA + #ifdef MA_NO_RUNTIME_LINKING + #ifdef __has_include + #if !__has_include() + #undef MA_HAS_ALSA + #endif + #endif + #endif +#endif +#ifdef MA_ENABLE_PULSEAUDIO + #define MA_HAS_PULSEAUDIO + #ifdef MA_NO_RUNTIME_LINKING + #ifdef __has_include + #if !__has_include() + #undef MA_HAS_PULSEAUDIO + #endif + #endif + #endif +#endif +#ifdef MA_ENABLE_JACK + #define MA_HAS_JACK + #ifdef MA_NO_RUNTIME_LINKING + #ifdef __has_include + #if !__has_include() + #undef MA_HAS_JACK + #endif + #endif + #endif +#endif +#ifdef MA_ENABLE_COREAUDIO + #define MA_HAS_COREAUDIO +#endif +#ifdef MA_ENABLE_SNDIO + #define MA_HAS_SNDIO +#endif +#ifdef MA_ENABLE_AUDIO4 + #define MA_HAS_AUDIO4 +#endif +#ifdef MA_ENABLE_OSS + #define MA_HAS_OSS +#endif +#ifdef MA_ENABLE_AAUDIO + #define MA_HAS_AAUDIO +#endif +#ifdef MA_ENABLE_OPENSL + #define MA_HAS_OPENSL +#endif +#ifdef MA_ENABLE_WEBAUDIO + #define MA_HAS_WEBAUDIO +#endif +#ifdef MA_ENABLE_NULL + #define MA_HAS_NULL /* Everything supports the null backend. */ +#endif + +MA_API const char* ma_get_backend_name(ma_backend backend) +{ + switch (backend) + { + case ma_backend_wasapi: return "WASAPI"; + case ma_backend_dsound: return "DirectSound"; + case ma_backend_winmm: return "WinMM"; + case ma_backend_coreaudio: return "Core Audio"; + case ma_backend_sndio: return "sndio"; + case ma_backend_audio4: return "audio(4)"; + case ma_backend_oss: return "OSS"; + case ma_backend_pulseaudio: return "PulseAudio"; + case ma_backend_alsa: return "ALSA"; + case ma_backend_jack: return "JACK"; + case ma_backend_aaudio: return "AAudio"; + case ma_backend_opensl: return "OpenSL|ES"; + case ma_backend_webaudio: return "Web Audio"; + case ma_backend_null: return "Null"; + default: return "Unknown"; + } +} + +MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend) +{ + switch (backend) + { + case ma_backend_wasapi: return MA_TRUE; + case ma_backend_dsound: return MA_FALSE; + case ma_backend_winmm: return MA_FALSE; + case ma_backend_coreaudio: return MA_FALSE; + case ma_backend_sndio: return MA_FALSE; + case ma_backend_audio4: return MA_FALSE; + case ma_backend_oss: return MA_FALSE; + case ma_backend_pulseaudio: return MA_FALSE; + case ma_backend_alsa: return MA_FALSE; + case ma_backend_jack: return MA_FALSE; + case ma_backend_aaudio: return MA_FALSE; + case ma_backend_opensl: return MA_FALSE; + case ma_backend_webaudio: return MA_FALSE; + case ma_backend_null: return MA_FALSE; + default: return MA_FALSE; + } +} + + + +#ifdef MA_WIN32 + #define MA_THREADCALL WINAPI + typedef unsigned long ma_thread_result; +#else + #define MA_THREADCALL + typedef void* ma_thread_result; +#endif +typedef ma_thread_result (MA_THREADCALL * ma_thread_entry_proc)(void* pData); + +#ifdef MA_WIN32 +static ma_result ma_result_from_GetLastError(DWORD error) +{ + switch (error) + { + case ERROR_SUCCESS: return MA_SUCCESS; + case ERROR_PATH_NOT_FOUND: return MA_DOES_NOT_EXIST; + case ERROR_TOO_MANY_OPEN_FILES: return MA_TOO_MANY_OPEN_FILES; + case ERROR_NOT_ENOUGH_MEMORY: return MA_OUT_OF_MEMORY; + case ERROR_DISK_FULL: return MA_NO_SPACE; + case ERROR_HANDLE_EOF: return MA_END_OF_FILE; + case ERROR_NEGATIVE_SEEK: return MA_BAD_SEEK; + case ERROR_INVALID_PARAMETER: return MA_INVALID_ARGS; + case ERROR_ACCESS_DENIED: return MA_ACCESS_DENIED; + case ERROR_SEM_TIMEOUT: return MA_TIMEOUT; + case ERROR_FILE_NOT_FOUND: return MA_DOES_NOT_EXIST; + default: break; + } + + return MA_ERROR; +} + +/* WASAPI error codes. */ +#define MA_AUDCLNT_E_NOT_INITIALIZED ((HRESULT)0x88890001) +#define MA_AUDCLNT_E_ALREADY_INITIALIZED ((HRESULT)0x88890002) +#define MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE ((HRESULT)0x88890003) +#define MA_AUDCLNT_E_DEVICE_INVALIDATED ((HRESULT)0x88890004) +#define MA_AUDCLNT_E_NOT_STOPPED ((HRESULT)0x88890005) +#define MA_AUDCLNT_E_BUFFER_TOO_LARGE ((HRESULT)0x88890006) +#define MA_AUDCLNT_E_OUT_OF_ORDER ((HRESULT)0x88890007) +#define MA_AUDCLNT_E_UNSUPPORTED_FORMAT ((HRESULT)0x88890008) +#define MA_AUDCLNT_E_INVALID_SIZE ((HRESULT)0x88890009) +#define MA_AUDCLNT_E_DEVICE_IN_USE ((HRESULT)0x8889000A) +#define MA_AUDCLNT_E_BUFFER_OPERATION_PENDING ((HRESULT)0x8889000B) +#define MA_AUDCLNT_E_THREAD_NOT_REGISTERED ((HRESULT)0x8889000C) +#define MA_AUDCLNT_E_NO_SINGLE_PROCESS ((HRESULT)0x8889000D) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED ((HRESULT)0x8889000E) +#define MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED ((HRESULT)0x8889000F) +#define MA_AUDCLNT_E_SERVICE_NOT_RUNNING ((HRESULT)0x88890010) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ((HRESULT)0x88890011) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY ((HRESULT)0x88890012) +#define MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL ((HRESULT)0x88890013) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_SET ((HRESULT)0x88890014) +#define MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE ((HRESULT)0x88890015) +#define MA_AUDCLNT_E_BUFFER_SIZE_ERROR ((HRESULT)0x88890016) +#define MA_AUDCLNT_E_CPUUSAGE_EXCEEDED ((HRESULT)0x88890017) +#define MA_AUDCLNT_E_BUFFER_ERROR ((HRESULT)0x88890018) +#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED ((HRESULT)0x88890019) +#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD ((HRESULT)0x88890020) +#define MA_AUDCLNT_E_INVALID_STREAM_FLAG ((HRESULT)0x88890021) +#define MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE ((HRESULT)0x88890022) +#define MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES ((HRESULT)0x88890023) +#define MA_AUDCLNT_E_OFFLOAD_MODE_ONLY ((HRESULT)0x88890024) +#define MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY ((HRESULT)0x88890025) +#define MA_AUDCLNT_E_RESOURCES_INVALIDATED ((HRESULT)0x88890026) +#define MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED ((HRESULT)0x88890027) +#define MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED ((HRESULT)0x88890028) +#define MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED ((HRESULT)0x88890029) +#define MA_AUDCLNT_E_HEADTRACKING_ENABLED ((HRESULT)0x88890030) +#define MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED ((HRESULT)0x88890040) +#define MA_AUDCLNT_S_BUFFER_EMPTY ((HRESULT)0x08890001) +#define MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED ((HRESULT)0x08890002) +#define MA_AUDCLNT_S_POSITION_STALLED ((HRESULT)0x08890003) + +#define MA_DS_OK ((HRESULT)0) +#define MA_DS_NO_VIRTUALIZATION ((HRESULT)0x0878000A) +#define MA_DSERR_ALLOCATED ((HRESULT)0x8878000A) +#define MA_DSERR_CONTROLUNAVAIL ((HRESULT)0x8878001E) +#define MA_DSERR_INVALIDPARAM ((HRESULT)0x80070057) /*E_INVALIDARG*/ +#define MA_DSERR_INVALIDCALL ((HRESULT)0x88780032) +#define MA_DSERR_GENERIC ((HRESULT)0x80004005) /*E_FAIL*/ +#define MA_DSERR_PRIOLEVELNEEDED ((HRESULT)0x88780046) +#define MA_DSERR_OUTOFMEMORY ((HRESULT)0x8007000E) /*E_OUTOFMEMORY*/ +#define MA_DSERR_BADFORMAT ((HRESULT)0x88780064) +#define MA_DSERR_UNSUPPORTED ((HRESULT)0x80004001) /*E_NOTIMPL*/ +#define MA_DSERR_NODRIVER ((HRESULT)0x88780078) +#define MA_DSERR_ALREADYINITIALIZED ((HRESULT)0x88780082) +#define MA_DSERR_NOAGGREGATION ((HRESULT)0x80040110) /*CLASS_E_NOAGGREGATION*/ +#define MA_DSERR_BUFFERLOST ((HRESULT)0x88780096) +#define MA_DSERR_OTHERAPPHASPRIO ((HRESULT)0x887800A0) +#define MA_DSERR_UNINITIALIZED ((HRESULT)0x887800AA) +#define MA_DSERR_NOINTERFACE ((HRESULT)0x80004002) /*E_NOINTERFACE*/ +#define MA_DSERR_ACCESSDENIED ((HRESULT)0x80070005) /*E_ACCESSDENIED*/ +#define MA_DSERR_BUFFERTOOSMALL ((HRESULT)0x887800B4) +#define MA_DSERR_DS8_REQUIRED ((HRESULT)0x887800BE) +#define MA_DSERR_SENDLOOP ((HRESULT)0x887800C8) +#define MA_DSERR_BADSENDBUFFERGUID ((HRESULT)0x887800D2) +#define MA_DSERR_OBJECTNOTFOUND ((HRESULT)0x88781161) +#define MA_DSERR_FXUNAVAILABLE ((HRESULT)0x887800DC) + +static ma_result ma_result_from_HRESULT(HRESULT hr) +{ + switch (hr) + { + case NOERROR: return MA_SUCCESS; + /*case S_OK: return MA_SUCCESS;*/ + + case E_POINTER: return MA_INVALID_ARGS; + case E_UNEXPECTED: return MA_ERROR; + case E_NOTIMPL: return MA_NOT_IMPLEMENTED; + case E_OUTOFMEMORY: return MA_OUT_OF_MEMORY; + case E_INVALIDARG: return MA_INVALID_ARGS; + case E_NOINTERFACE: return MA_API_NOT_FOUND; + case E_HANDLE: return MA_INVALID_ARGS; + case E_ABORT: return MA_ERROR; + case E_FAIL: return MA_ERROR; + case E_ACCESSDENIED: return MA_ACCESS_DENIED; + + /* WASAPI */ + case MA_AUDCLNT_E_NOT_INITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + case MA_AUDCLNT_E_ALREADY_INITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_INVALIDATED: return MA_UNAVAILABLE; + case MA_AUDCLNT_E_NOT_STOPPED: return MA_DEVICE_NOT_STOPPED; + case MA_AUDCLNT_E_BUFFER_TOO_LARGE: return MA_TOO_BIG; + case MA_AUDCLNT_E_OUT_OF_ORDER: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_UNSUPPORTED_FORMAT: return MA_FORMAT_NOT_SUPPORTED; + case MA_AUDCLNT_E_INVALID_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_IN_USE: return MA_BUSY; + case MA_AUDCLNT_E_BUFFER_OPERATION_PENDING: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_THREAD_NOT_REGISTERED: return MA_DOES_NOT_EXIST; + case MA_AUDCLNT_E_NO_SINGLE_PROCESS: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED: return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + case MA_AUDCLNT_E_SERVICE_NOT_RUNNING: return MA_NOT_CONNECTED; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_SET: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_BUFFER_SIZE_ERROR: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_CPUUSAGE_EXCEEDED: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_ERROR: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_DEVICE_PERIOD: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_STREAM_FLAG: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES: return MA_OUT_OF_MEMORY; + case MA_AUDCLNT_E_OFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_RESOURCES_INVALIDATED: return MA_INVALID_DATA; + case MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_ENABLED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_S_BUFFER_EMPTY: return MA_NO_SPACE; + case MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED: return MA_ALREADY_EXISTS; + case MA_AUDCLNT_S_POSITION_STALLED: return MA_ERROR; + + /* DirectSound */ + /*case MA_DS_OK: return MA_SUCCESS;*/ /* S_OK */ + case MA_DS_NO_VIRTUALIZATION: return MA_SUCCESS; + case MA_DSERR_ALLOCATED: return MA_ALREADY_IN_USE; + case MA_DSERR_CONTROLUNAVAIL: return MA_INVALID_OPERATION; + /*case MA_DSERR_INVALIDPARAM: return MA_INVALID_ARGS;*/ /* E_INVALIDARG */ + case MA_DSERR_INVALIDCALL: return MA_INVALID_OPERATION; + /*case MA_DSERR_GENERIC: return MA_ERROR;*/ /* E_FAIL */ + case MA_DSERR_PRIOLEVELNEEDED: return MA_INVALID_OPERATION; + /*case MA_DSERR_OUTOFMEMORY: return MA_OUT_OF_MEMORY;*/ /* E_OUTOFMEMORY */ + case MA_DSERR_BADFORMAT: return MA_FORMAT_NOT_SUPPORTED; + /*case MA_DSERR_UNSUPPORTED: return MA_NOT_IMPLEMENTED;*/ /* E_NOTIMPL */ + case MA_DSERR_NODRIVER: return MA_FAILED_TO_INIT_BACKEND; + case MA_DSERR_ALREADYINITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_DSERR_NOAGGREGATION: return MA_ERROR; + case MA_DSERR_BUFFERLOST: return MA_UNAVAILABLE; + case MA_DSERR_OTHERAPPHASPRIO: return MA_ACCESS_DENIED; + case MA_DSERR_UNINITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + /*case MA_DSERR_NOINTERFACE: return MA_API_NOT_FOUND;*/ /* E_NOINTERFACE */ + /*case MA_DSERR_ACCESSDENIED: return MA_ACCESS_DENIED;*/ /* E_ACCESSDENIED */ + case MA_DSERR_BUFFERTOOSMALL: return MA_NO_SPACE; + case MA_DSERR_DS8_REQUIRED: return MA_INVALID_OPERATION; + case MA_DSERR_SENDLOOP: return MA_DEADLOCK; + case MA_DSERR_BADSENDBUFFERGUID: return MA_INVALID_ARGS; + case MA_DSERR_OBJECTNOTFOUND: return MA_NO_DEVICE; + case MA_DSERR_FXUNAVAILABLE: return MA_UNAVAILABLE; + + default: return MA_ERROR; + } +} + +typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(LPVOID pvReserved, DWORD dwCoInit); +typedef void (WINAPI * MA_PFN_CoUninitialize)(void); +typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(REFCLSID rclsid, LPUNKNOWN pUnkOuter, DWORD dwClsContext, REFIID riid, LPVOID *ppv); +typedef void (WINAPI * MA_PFN_CoTaskMemFree)(LPVOID pv); +typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(PROPVARIANT *pvar); +typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, LPOLESTR lpsz, int cchMax); + +typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)(void); +typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)(void); + +/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */ +typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, LPCSTR lpSubKey, DWORD ulOptions, REGSAM samDesired, PHKEY phkResult); +typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey); +typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, LPCSTR lpValueName, LPDWORD lpReserved, LPDWORD lpType, LPBYTE lpData, LPDWORD lpcbData); +#endif + + +#define MA_STATE_UNINITIALIZED 0 +#define MA_STATE_STOPPED 1 /* The device's default state after initialization. */ +#define MA_STATE_STARTED 2 /* The worker thread is in it's main loop waiting for the driver to request or deliver audio data. */ +#define MA_STATE_STARTING 3 /* Transitioning from a stopped state to started. */ +#define MA_STATE_STOPPING 4 /* Transitioning from a started state to stopped. */ + +#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device" +#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device" + + +MA_API const char* ma_log_level_to_string(ma_uint32 logLevel) +{ + switch (logLevel) + { + case MA_LOG_LEVEL_VERBOSE: return ""; + case MA_LOG_LEVEL_INFO: return "INFO"; + case MA_LOG_LEVEL_WARNING: return "WARNING"; + case MA_LOG_LEVEL_ERROR: return "ERROR"; + default: return "ERROR"; + } +} + +/* Posts a log message. */ +static void ma_post_log_message(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message) +{ + if (pContext == NULL) { + if (pDevice != NULL) { + pContext = pDevice->pContext; + } + } + + if (pContext == NULL) { + return; + } + +#if defined(MA_LOG_LEVEL) + if (logLevel <= MA_LOG_LEVEL) { + ma_log_proc onLog; + + #if defined(MA_DEBUG_OUTPUT) + if (logLevel <= MA_LOG_LEVEL) { + printf("%s: %s\n", ma_log_level_to_string(logLevel), message); + } + #endif + + onLog = pContext->logCallback; + if (onLog) { + onLog(pContext, pDevice, logLevel, message); + } + } +#endif +} + +/* Posts a formatted log message. */ +static void ma_post_log_messagev(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* pFormat, va_list args) +{ +#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && !defined(__STRICT_ANSI__) + { + char pFormattedMessage[1024]; + vsnprintf(pFormattedMessage, sizeof(pFormattedMessage), pFormat, args); + ma_post_log_message(pContext, pDevice, logLevel, pFormattedMessage); + } +#else + { + /* + Without snprintf() we need to first measure the string and then heap allocate it. I'm only aware of Visual Studio having support for this without snprintf(), so we'll + need to restrict this branch to Visual Studio. For other compilers we need to just not support formatted logging because I don't want the security risk of overflowing + a fixed sized stack allocated buffer. + */ +#if defined (_MSC_VER) + int formattedLen; + va_list args2; + + #if _MSC_VER >= 1800 + va_copy(args2, args); + #else + args2 = args; + #endif + formattedLen = _vscprintf(pFormat, args2); + va_end(args2); + + if (formattedLen > 0) { + char* pFormattedMessage = NULL; + ma_allocation_callbacks* pAllocationCallbacks = NULL; + + /* Make sure we have a context so we can allocate memory. */ + if (pContext == NULL) { + if (pDevice != NULL) { + pContext = pDevice->pContext; + } + } + + if (pContext != NULL) { + pAllocationCallbacks = &pContext->allocationCallbacks; + } + + pFormattedMessage = (char*)ma_malloc(formattedLen + 1, pAllocationCallbacks); + if (pFormattedMessage != NULL) { + vsprintf_s(pFormattedMessage, formattedLen + 1, pFormat, args); + ma_post_log_message(pContext, pDevice, logLevel, pFormattedMessage); + ma_free(pFormattedMessage, pAllocationCallbacks); + } + } +#else + /* Can't do anything because we don't have a safe way of to emulate vsnprintf() without a manual solution. */ + (void)pContext; + (void)pDevice; + (void)logLevel; + (void)pFormat; + (void)args; +#endif + } +#endif +} + +static MA_INLINE void ma_post_log_messagef(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* pFormat, ...) +{ + va_list args; + va_start(args, pFormat); + { + ma_post_log_messagev(pContext, pDevice, logLevel, pFormat, args); + } + va_end(args); +} + +/* Posts an log message. Throw a breakpoint in here if you're needing to debug. The return value is always "resultCode". */ +static ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) +{ + ma_post_log_message(pContext, pDevice, logLevel, message); + return resultCode; +} + +static ma_result ma_post_error(ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) +{ + return ma_context_post_error(NULL, pDevice, logLevel, message, resultCode); +} + + +/******************************************************************************* + +Timing + +*******************************************************************************/ +#ifdef MA_WIN32 + static LARGE_INTEGER g_ma_TimerFrequency = {{0}}; + static void ma_timer_init(ma_timer* pTimer) + { + LARGE_INTEGER counter; + + if (g_ma_TimerFrequency.QuadPart == 0) { + QueryPerformanceFrequency(&g_ma_TimerFrequency); + } + + QueryPerformanceCounter(&counter); + pTimer->counter = counter.QuadPart; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + LARGE_INTEGER counter; + if (!QueryPerformanceCounter(&counter)) { + return 0; + } + + return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart; + } +#elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200) + static ma_uint64 g_ma_TimerFrequency = 0; + static void ma_timer_init(ma_timer* pTimer) + { + mach_timebase_info_data_t baseTime; + mach_timebase_info(&baseTime); + g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer; + + pTimer->counter = mach_absolute_time(); + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter = mach_absolute_time(); + ma_uint64 oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency; + } +#elif defined(MA_EMSCRIPTEN) + static MA_INLINE void ma_timer_init(ma_timer* pTimer) + { + pTimer->counterD = emscripten_get_now(); + } + + static MA_INLINE double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */ + } +#else + #if _POSIX_C_SOURCE >= 199309L + #if defined(CLOCK_MONOTONIC) + #define MA_CLOCK_ID CLOCK_MONOTONIC + #else + #define MA_CLOCK_ID CLOCK_REALTIME + #endif + + static void ma_timer_init(ma_timer* pTimer) + { + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000000.0; + } + #else + static void ma_timer_init(ma_timer* pTimer) + { + struct timeval newTime; + gettimeofday(&newTime, NULL); + + pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timeval newTime; + gettimeofday(&newTime, NULL); + + newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000.0; + } + #endif +#endif + + +/******************************************************************************* + +Dynamic Linking + +*******************************************************************************/ +MA_API ma_handle ma_dlopen(ma_context* pContext, const char* filename) +{ + ma_handle handle; + +#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE + if (pContext != NULL) { + char message[256]; + ma_strappend(message, sizeof(message), "Loading library: ", filename); + ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message); + } +#endif + +#ifdef _WIN32 +#ifdef MA_WIN32_DESKTOP + handle = (ma_handle)LoadLibraryA(filename); +#else + /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */ + WCHAR filenameW[4096]; + if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) { + handle = NULL; + } else { + handle = (ma_handle)LoadPackagedLibrary(filenameW, 0); + } +#endif +#else + handle = (ma_handle)dlopen(filename, RTLD_NOW); +#endif + + /* + I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority + backend is a deliberate design choice. Instead I'm logging it as an informational message. + */ +#if MA_LOG_LEVEL >= MA_LOG_LEVEL_INFO + if (handle == NULL) { + char message[256]; + ma_strappend(message, sizeof(message), "Failed to load library: ", filename); + ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_INFO, message); + } +#endif + + (void)pContext; /* It's possible for pContext to be unused. */ + return handle; +} + +MA_API void ma_dlclose(ma_context* pContext, ma_handle handle) +{ +#ifdef _WIN32 + FreeLibrary((HMODULE)handle); +#else + dlclose((void*)handle); +#endif + + (void)pContext; +} + +MA_API ma_proc ma_dlsym(ma_context* pContext, ma_handle handle, const char* symbol) +{ + ma_proc proc; + +#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE + if (pContext != NULL) { + char message[256]; + ma_strappend(message, sizeof(message), "Loading symbol: ", symbol); + ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message); + } +#endif + +#ifdef _WIN32 + proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol); +#else +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" +#endif + proc = (ma_proc)dlsym((void*)handle, symbol); +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic pop +#endif +#endif + +#if MA_LOG_LEVEL >= MA_LOG_LEVEL_WARNING + if (handle == NULL) { + char message[256]; + ma_strappend(message, sizeof(message), "Failed to load symbol: ", symbol); + ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_WARNING, message); + } +#endif + + (void)pContext; /* It's possible for pContext to be unused. */ + return proc; +} + + +/******************************************************************************* + +Threading + +*******************************************************************************/ +#ifdef MA_WIN32 +static int ma_thread_priority_to_win32(ma_thread_priority priority) +{ + switch (priority) { + case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE; + case ma_thread_priority_lowest: return THREAD_PRIORITY_LOWEST; + case ma_thread_priority_low: return THREAD_PRIORITY_BELOW_NORMAL; + case ma_thread_priority_normal: return THREAD_PRIORITY_NORMAL; + case ma_thread_priority_high: return THREAD_PRIORITY_ABOVE_NORMAL; + case ma_thread_priority_highest: return THREAD_PRIORITY_HIGHEST; + case ma_thread_priority_realtime: return THREAD_PRIORITY_TIME_CRITICAL; + default: return THREAD_PRIORITY_NORMAL; + } +} + +static ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +{ + pThread->win32.hThread = CreateThread(NULL, 0, entryProc, pData, 0, NULL); + if (pThread->win32.hThread == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + SetThreadPriority((HANDLE)pThread->win32.hThread, ma_thread_priority_to_win32(pContext->threadPriority)); + + return MA_SUCCESS; +} + +static void ma_thread_wait__win32(ma_thread* pThread) +{ + WaitForSingleObject(pThread->win32.hThread, INFINITE); +} + +static void ma_sleep__win32(ma_uint32 milliseconds) +{ + Sleep((DWORD)milliseconds); +} + + +static ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex) +{ + (void)pContext; + + pMutex->win32.hMutex = CreateEventW(NULL, FALSE, TRUE, NULL); + if (pMutex->win32.hMutex == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__win32(ma_mutex* pMutex) +{ + CloseHandle(pMutex->win32.hMutex); +} + +static void ma_mutex_lock__win32(ma_mutex* pMutex) +{ + WaitForSingleObject(pMutex->win32.hMutex, INFINITE); +} + +static void ma_mutex_unlock__win32(ma_mutex* pMutex) +{ + SetEvent(pMutex->win32.hMutex); +} + + +static ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent) +{ + (void)pContext; + + pEvent->win32.hEvent = CreateEventW(NULL, FALSE, FALSE, NULL); + if (pEvent->win32.hEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_event_uninit__win32(ma_event* pEvent) +{ + CloseHandle(pEvent->win32.hEvent); +} + +static ma_bool32 ma_event_wait__win32(ma_event* pEvent) +{ + return WaitForSingleObject(pEvent->win32.hEvent, INFINITE) == WAIT_OBJECT_0; +} + +static ma_bool32 ma_event_signal__win32(ma_event* pEvent) +{ + return SetEvent(pEvent->win32.hEvent); +} + + +static ma_result ma_semaphore_init__win32(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) +{ + (void)pContext; + + pSemaphore->win32.hSemaphore = CreateSemaphoreW(NULL, (LONG)initialValue, LONG_MAX, NULL); + if (pSemaphore->win32.hSemaphore == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__win32(ma_semaphore* pSemaphore) +{ + CloseHandle((HANDLE)pSemaphore->win32.hSemaphore); +} + +static ma_bool32 ma_semaphore_wait__win32(ma_semaphore* pSemaphore) +{ + return WaitForSingleObject((HANDLE)pSemaphore->win32.hSemaphore, INFINITE) == WAIT_OBJECT_0; +} + +static ma_bool32 ma_semaphore_release__win32(ma_semaphore* pSemaphore) +{ + return ReleaseSemaphore((HANDLE)pSemaphore->win32.hSemaphore, 1, NULL) != 0; +} +#endif + + +#ifdef MA_POSIX +#include + +typedef int (* ma_pthread_create_proc)(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); +typedef int (* ma_pthread_join_proc)(pthread_t thread, void **retval); +typedef int (* ma_pthread_mutex_init_proc)(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr); +typedef int (* ma_pthread_mutex_destroy_proc)(pthread_mutex_t *__mutex); +typedef int (* ma_pthread_mutex_lock_proc)(pthread_mutex_t *__mutex); +typedef int (* ma_pthread_mutex_unlock_proc)(pthread_mutex_t *__mutex); +typedef int (* ma_pthread_cond_init_proc)(pthread_cond_t *__restrict __cond, const pthread_condattr_t *__restrict __cond_attr); +typedef int (* ma_pthread_cond_destroy_proc)(pthread_cond_t *__cond); +typedef int (* ma_pthread_cond_signal_proc)(pthread_cond_t *__cond); +typedef int (* ma_pthread_cond_wait_proc)(pthread_cond_t *__restrict __cond, pthread_mutex_t *__restrict __mutex); +typedef int (* ma_pthread_attr_init_proc)(pthread_attr_t *attr); +typedef int (* ma_pthread_attr_destroy_proc)(pthread_attr_t *attr); +typedef int (* ma_pthread_attr_setschedpolicy_proc)(pthread_attr_t *attr, int policy); +typedef int (* ma_pthread_attr_getschedparam_proc)(const pthread_attr_t *attr, struct sched_param *param); +typedef int (* ma_pthread_attr_setschedparam_proc)(pthread_attr_t *attr, const struct sched_param *param); + +static ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +{ + int result; + pthread_attr_t* pAttr = NULL; + +#if !defined(__EMSCRIPTEN__) + /* Try setting the thread priority. It's not critical if anything fails here. */ + pthread_attr_t attr; + if (((ma_pthread_attr_init_proc)pContext->posix.pthread_attr_init)(&attr) == 0) { + int scheduler = -1; + if (pContext->threadPriority == ma_thread_priority_idle) { +#ifdef SCHED_IDLE + if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_IDLE) == 0) { + scheduler = SCHED_IDLE; + } +#endif + } else if (pContext->threadPriority == ma_thread_priority_realtime) { +#ifdef SCHED_FIFO + if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_FIFO) == 0) { + scheduler = SCHED_FIFO; + } +#endif +#ifdef MA_LINUX + } else { + scheduler = sched_getscheduler(0); +#endif + } + + if (scheduler != -1) { + int priorityMin = sched_get_priority_min(scheduler); + int priorityMax = sched_get_priority_max(scheduler); + int priorityStep = (priorityMax - priorityMin) / 7; /* 7 = number of priorities supported by miniaudio. */ + + struct sched_param sched; + if (((ma_pthread_attr_getschedparam_proc)pContext->posix.pthread_attr_getschedparam)(&attr, &sched) == 0) { + if (pContext->threadPriority == ma_thread_priority_idle) { + sched.sched_priority = priorityMin; + } else if (pContext->threadPriority == ma_thread_priority_realtime) { + sched.sched_priority = priorityMax; + } else { + sched.sched_priority += ((int)pContext->threadPriority + 5) * priorityStep; /* +5 because the lowest priority is -5. */ + if (sched.sched_priority < priorityMin) { + sched.sched_priority = priorityMin; + } + if (sched.sched_priority > priorityMax) { + sched.sched_priority = priorityMax; + } + } + + if (((ma_pthread_attr_setschedparam_proc)pContext->posix.pthread_attr_setschedparam)(&attr, &sched) == 0) { + pAttr = &attr; + } + } + } + + ((ma_pthread_attr_destroy_proc)pContext->posix.pthread_attr_destroy)(&attr); + } +#endif + + result = ((ma_pthread_create_proc)pContext->posix.pthread_create)(&pThread->posix.thread, pAttr, entryProc, pData); + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_thread_wait__posix(ma_thread* pThread) +{ + ((ma_pthread_join_proc)pThread->pContext->posix.pthread_join)(pThread->posix.thread, NULL); +} + +#if !defined(MA_EMSCRIPTEN) +static void ma_sleep__posix(ma_uint32 milliseconds) +{ +#ifdef MA_EMSCRIPTEN + (void)milliseconds; + MA_ASSERT(MA_FALSE); /* The Emscripten build should never sleep. */ +#else + #if _POSIX_C_SOURCE >= 199309L + struct timespec ts; + ts.tv_sec = milliseconds / 1000000; + ts.tv_nsec = milliseconds % 1000000 * 1000000; + nanosleep(&ts, NULL); + #else + struct timeval tv; + tv.tv_sec = milliseconds / 1000; + tv.tv_usec = milliseconds % 1000 * 1000; + select(0, NULL, NULL, NULL, &tv); + #endif +#endif +} +#endif /* MA_EMSCRIPTEN */ + + +static ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex) +{ + int result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pMutex->posix.mutex, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__posix(ma_mutex* pMutex) +{ + ((ma_pthread_mutex_destroy_proc)pMutex->pContext->posix.pthread_mutex_destroy)(&pMutex->posix.mutex); +} + +static void ma_mutex_lock__posix(ma_mutex* pMutex) +{ + ((ma_pthread_mutex_lock_proc)pMutex->pContext->posix.pthread_mutex_lock)(&pMutex->posix.mutex); +} + +static void ma_mutex_unlock__posix(ma_mutex* pMutex) +{ + ((ma_pthread_mutex_unlock_proc)pMutex->pContext->posix.pthread_mutex_unlock)(&pMutex->posix.mutex); +} + + +static ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent) +{ + int result; + + result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pEvent->posix.mutex, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + result = ((ma_pthread_cond_init_proc)pContext->posix.pthread_cond_init)(&pEvent->posix.condition, NULL); + if (result != 0) { + ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex); + return ma_result_from_errno(result); + } + + pEvent->posix.value = 0; + return MA_SUCCESS; +} + +static void ma_event_uninit__posix(ma_event* pEvent) +{ + ((ma_pthread_cond_destroy_proc)pEvent->pContext->posix.pthread_cond_destroy)(&pEvent->posix.condition); + ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex); +} + +static ma_bool32 ma_event_wait__posix(ma_event* pEvent) +{ + ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); + { + while (pEvent->posix.value == 0) { + ((ma_pthread_cond_wait_proc)pEvent->pContext->posix.pthread_cond_wait)(&pEvent->posix.condition, &pEvent->posix.mutex); + } + pEvent->posix.value = 0; /* Auto-reset. */ + } + ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); + + return MA_TRUE; +} + +static ma_bool32 ma_event_signal__posix(ma_event* pEvent) +{ + ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); + { + pEvent->posix.value = 1; + ((ma_pthread_cond_signal_proc)pEvent->pContext->posix.pthread_cond_signal)(&pEvent->posix.condition); + } + ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); + + return MA_TRUE; +} + + +static ma_result ma_semaphore_init__posix(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) +{ + (void)pContext; + +#if defined(MA_APPLE) + /* Not yet implemented for Apple platforms since sem_init() is deprecated. Need to use a named semaphore via sem_open() instead. */ + (void)initialValue; + (void)pSemaphore; + return MA_INVALID_OPERATION; +#else + if (sem_init(&pSemaphore->posix.semaphore, 0, (unsigned int)initialValue) == 0) { + return ma_result_from_errno(errno); + } +#endif + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__posix(ma_semaphore* pSemaphore) +{ + sem_close(&pSemaphore->posix.semaphore); +} + +static ma_bool32 ma_semaphore_wait__posix(ma_semaphore* pSemaphore) +{ + return sem_wait(&pSemaphore->posix.semaphore) != -1; +} + +static ma_bool32 ma_semaphore_release__posix(ma_semaphore* pSemaphore) +{ + return sem_post(&pSemaphore->posix.semaphore) != -1; +} +#endif + +static ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +{ + if (pContext == NULL || pThread == NULL || entryProc == NULL) { + return MA_FALSE; + } + + pThread->pContext = pContext; + +#ifdef MA_WIN32 + return ma_thread_create__win32(pContext, pThread, entryProc, pData); +#endif +#ifdef MA_POSIX + return ma_thread_create__posix(pContext, pThread, entryProc, pData); +#endif +} + +static void ma_thread_wait(ma_thread* pThread) +{ + if (pThread == NULL) { + return; + } + +#ifdef MA_WIN32 + ma_thread_wait__win32(pThread); +#endif +#ifdef MA_POSIX + ma_thread_wait__posix(pThread); +#endif +} + +#if !defined(MA_EMSCRIPTEN) +static void ma_sleep(ma_uint32 milliseconds) +{ +#ifdef MA_WIN32 + ma_sleep__win32(milliseconds); +#endif +#ifdef MA_POSIX + ma_sleep__posix(milliseconds); +#endif +} +#endif + + +MA_API ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex) +{ + if (pContext == NULL || pMutex == NULL) { + return MA_INVALID_ARGS; + } + + pMutex->pContext = pContext; + +#ifdef MA_WIN32 + return ma_mutex_init__win32(pContext, pMutex); +#endif +#ifdef MA_POSIX + return ma_mutex_init__posix(pContext, pMutex); +#endif +} + +MA_API void ma_mutex_uninit(ma_mutex* pMutex) +{ + if (pMutex == NULL || pMutex->pContext == NULL) { + return; + } + +#ifdef MA_WIN32 + ma_mutex_uninit__win32(pMutex); +#endif +#ifdef MA_POSIX + ma_mutex_uninit__posix(pMutex); +#endif +} + +MA_API void ma_mutex_lock(ma_mutex* pMutex) +{ + if (pMutex == NULL || pMutex->pContext == NULL) { + return; + } + +#ifdef MA_WIN32 + ma_mutex_lock__win32(pMutex); +#endif +#ifdef MA_POSIX + ma_mutex_lock__posix(pMutex); +#endif +} + +MA_API void ma_mutex_unlock(ma_mutex* pMutex) +{ + if (pMutex == NULL || pMutex->pContext == NULL) { + return; +} + +#ifdef MA_WIN32 + ma_mutex_unlock__win32(pMutex); +#endif +#ifdef MA_POSIX + ma_mutex_unlock__posix(pMutex); +#endif +} + + +MA_API ma_result ma_event_init(ma_context* pContext, ma_event* pEvent) +{ + if (pContext == NULL || pEvent == NULL) { + return MA_FALSE; + } + + pEvent->pContext = pContext; + +#ifdef MA_WIN32 + return ma_event_init__win32(pContext, pEvent); +#endif +#ifdef MA_POSIX + return ma_event_init__posix(pContext, pEvent); +#endif +} + +MA_API void ma_event_uninit(ma_event* pEvent) +{ + if (pEvent == NULL || pEvent->pContext == NULL) { + return; + } + +#ifdef MA_WIN32 + ma_event_uninit__win32(pEvent); +#endif +#ifdef MA_POSIX + ma_event_uninit__posix(pEvent); +#endif +} + +MA_API ma_bool32 ma_event_wait(ma_event* pEvent) +{ + if (pEvent == NULL || pEvent->pContext == NULL) { + return MA_FALSE; + } + +#ifdef MA_WIN32 + return ma_event_wait__win32(pEvent); +#endif +#ifdef MA_POSIX + return ma_event_wait__posix(pEvent); +#endif +} + +MA_API ma_bool32 ma_event_signal(ma_event* pEvent) +{ + if (pEvent == NULL || pEvent->pContext == NULL) { + return MA_FALSE; + } + +#ifdef MA_WIN32 + return ma_event_signal__win32(pEvent); +#endif +#ifdef MA_POSIX + return ma_event_signal__posix(pEvent); +#endif +} + + +MA_API ma_result ma_semaphore_init(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) +{ + if (pContext == NULL || pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + +#ifdef MA_WIN32 + return ma_semaphore_init__win32(pContext, initialValue, pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_init__posix(pContext, initialValue, pSemaphore); +#endif +} + +MA_API void ma_semaphore_uninit(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return; + } + +#ifdef MA_WIN32 + ma_semaphore_uninit__win32(pSemaphore); +#endif +#ifdef MA_POSIX + ma_semaphore_uninit__posix(pSemaphore); +#endif +} + +MA_API ma_bool32 ma_semaphore_wait(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_FALSE; + } + +#ifdef MA_WIN32 + return ma_semaphore_wait__win32(pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_wait__posix(pSemaphore); +#endif +} + +MA_API ma_bool32 ma_semaphore_release(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_FALSE; + } + +#ifdef MA_WIN32 + return ma_semaphore_release__win32(pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_release__posix(pSemaphore); +#endif +} + + +#if 0 +static ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn) +{ + ma_uint32 closestRate = 0; + ma_uint32 closestDiff = 0xFFFFFFFF; + size_t iStandardRate; + + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + ma_uint32 diff; + + if (sampleRateIn > standardRate) { + diff = sampleRateIn - standardRate; + } else { + diff = standardRate - sampleRateIn; + } + + if (diff == 0) { + return standardRate; /* The input sample rate is a standard rate. */ + } + + if (closestDiff > diff) { + closestDiff = diff; + closestRate = standardRate; + } + } + + return closestRate; +} +#endif + +MA_API ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale) +{ + return ma_max(1, (ma_uint32)(baseBufferSize*scale)); +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate) +{ + return bufferSizeInFrames / (sampleRate/1000); +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate) +{ + return bufferSizeInMilliseconds * (sampleRate/1000); +} + +MA_API void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels) +{ + MA_ZERO_MEMORY(p, frameCount * ma_get_bytes_per_frame(format, channels)); +} + +MA_API void ma_clip_samples_f32(float* p, ma_uint32 sampleCount) +{ + ma_uint32 iSample; + + /* TODO: Research a branchless SSE implementation. */ + for (iSample = 0; iSample < sampleCount; iSample += 1) { + p[iSample] = ma_clip_f32(p[iSample]); + } +} + + +MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint32 sampleCount, float factor) +{ + ma_uint32 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_uint8)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint32 sampleCount, float factor) +{ + ma_uint32 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int16)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint32 sampleCount, float factor) +{ + ma_uint32 iSample; + ma_uint8* pSamplesOut8; + ma_uint8* pSamplesIn8; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + pSamplesOut8 = (ma_uint8*)pSamplesOut; + pSamplesIn8 = (ma_uint8*)pSamplesIn; + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ma_int32 sampleS32; + + sampleS32 = (ma_int32)(((ma_uint32)(pSamplesIn8[iSample*3+0]) << 8) | ((ma_uint32)(pSamplesIn8[iSample*3+1]) << 16) | ((ma_uint32)(pSamplesIn8[iSample*3+2])) << 24); + sampleS32 = (ma_int32)(sampleS32 * factor); + + pSamplesOut8[iSample*3+0] = (ma_uint8)(((ma_uint32)sampleS32 & 0x0000FF00) >> 8); + pSamplesOut8[iSample*3+1] = (ma_uint8)(((ma_uint32)sampleS32 & 0x00FF0000) >> 16); + pSamplesOut8[iSample*3+2] = (ma_uint8)(((ma_uint32)sampleS32 & 0xFF000000) >> 24); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint32 sampleCount, float factor) +{ + ma_uint32 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int32)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint32 sampleCount, float factor) +{ + ma_uint32 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = pSamplesIn[iSample] * factor; + } +} + +MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint32 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint32 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint32 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint32 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint32 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFramesOut, const ma_uint8* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFramesOut, const ma_int16* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFramesOut, const ma_int32* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pPCMFramesOut, const float* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + switch (format) + { + case ma_format_u8: ma_copy_and_apply_volume_factor_pcm_frames_u8 ((ma_uint8*)pPCMFramesOut, (const ma_uint8*)pPCMFramesIn, frameCount, channels, factor); return; + case ma_format_s16: ma_copy_and_apply_volume_factor_pcm_frames_s16((ma_int16*)pPCMFramesOut, (const ma_int16*)pPCMFramesIn, frameCount, channels, factor); return; + case ma_format_s24: ma_copy_and_apply_volume_factor_pcm_frames_s24( pPCMFramesOut, pPCMFramesIn, frameCount, channels, factor); return; + case ma_format_s32: ma_copy_and_apply_volume_factor_pcm_frames_s32((ma_int32*)pPCMFramesOut, (const ma_int32*)pPCMFramesIn, frameCount, channels, factor); return; + case ma_format_f32: ma_copy_and_apply_volume_factor_pcm_frames_f32( (float*)pPCMFramesOut, (const float*)pPCMFramesIn, frameCount, channels, factor); return; + default: return; /* Do nothing. */ + } +} + +MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_u8(pPCMFrames, pPCMFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s16(pPCMFrames, pPCMFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s24(pPCMFrames, pPCMFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s32(pPCMFrames, pPCMFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_f32(pPCMFrames, pPCMFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames(void* pPCMFrames, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames(pPCMFrames, pPCMFrames, frameCount, format, channels, factor); +} + + +MA_API float ma_factor_to_gain_db(float factor) +{ + return (float)(20*ma_log10f(factor)); +} + +MA_API float ma_gain_db_to_factor(float gain) +{ + return (float)ma_powf(10, gain/20.0f); +} + + +static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + float masterVolumeFactor; + + masterVolumeFactor = pDevice->masterVolumeFactor; + + if (pDevice->onData) { + if (!pDevice->noPreZeroedOutputBuffer && pFramesOut != NULL) { + ma_zero_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels); + } + + /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */ + if (pFramesIn != NULL && masterVolumeFactor < 1) { + ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 totalFramesProcessed = 0; + while (totalFramesProcessed < frameCount) { + ma_uint32 framesToProcessThisIteration = frameCount - totalFramesProcessed; + if (framesToProcessThisIteration > sizeof(tempFramesIn)/bpfCapture) { + framesToProcessThisIteration = sizeof(tempFramesIn)/bpfCapture; + } + + ma_copy_and_apply_volume_factor_pcm_frames(tempFramesIn, ma_offset_ptr(pFramesIn, totalFramesProcessed*bpfCapture), framesToProcessThisIteration, pDevice->capture.format, pDevice->capture.channels, masterVolumeFactor); + + pDevice->onData(pDevice, ma_offset_ptr(pFramesOut, totalFramesProcessed*bpfPlayback), tempFramesIn, framesToProcessThisIteration); + + totalFramesProcessed += framesToProcessThisIteration; + } + } else { + pDevice->onData(pDevice, pFramesOut, pFramesIn, frameCount); + } + + /* Volume control and clipping for playback devices. */ + if (pFramesOut != NULL) { + if (masterVolumeFactor < 1) { + if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */ + ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor); + } + } + + if (!pDevice->noClip && pDevice->playback.format == ma_format_f32) { + ma_clip_pcm_frames_f32((float*)pFramesOut, frameCount, pDevice->playback.channels); + } + } + } +} + + + +/* A helper function for reading sample data from the client. */ +static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesOut != NULL); + + if (pDevice->playback.converter.isPassthrough) { + ma_device__on_data(pDevice, pFramesOut, NULL, frameCount); + } else { + ma_result result; + ma_uint64 totalFramesReadOut; + ma_uint64 totalFramesReadIn; + void* pRunningFramesOut; + + totalFramesReadOut = 0; + totalFramesReadIn = 0; + pRunningFramesOut = pFramesOut; + + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, framesToReadThisIterationOut); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (framesToReadThisIterationIn > 0) { + ma_device__on_data(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn); + totalFramesReadIn += framesToReadThisIterationIn; + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationIn = framesToReadThisIterationIn; + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + } +} + +/* A helper for sending sample data to the client. */ +static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + + if (pDevice->capture.converter.isPassthrough) { + ma_device__on_data(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat); + } else { + ma_result result; + ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 totalDeviceFramesProcessed = 0; + ma_uint64 totalClientFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + /* We just keep going until we've exhaused all of our input frames and cannot generate any more output frames. */ + for (;;) { + ma_uint64 deviceFramesProcessedThisIteration; + ma_uint64 clientFramesProcessedThisIteration; + + deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + clientFramesProcessedThisIteration = framesInClientFormatCap; + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration); + if (result != MA_SUCCESS) { + break; + } + + if (clientFramesProcessedThisIteration > 0) { + ma_device__on_data(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */ + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += deviceFramesProcessedThisIteration; + totalClientFramesProcessed += clientFramesProcessedThisIteration; + + if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) { + break; /* We're done. */ + } + } + } +} + + +/* We only want to expose ma_device__handle_duplex_callback_capture() and ma_device__handle_duplex_callback_playback() if we have an asynchronous backend enabled. */ +#if defined(MA_HAS_JACK) || \ + defined(MA_HAS_COREAUDIO) || \ + defined(MA_HAS_AAUDIO) || \ + defined(MA_HAS_OPENSL) || \ + defined(MA_HAS_WEBAUDIO) +static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint32 totalDeviceFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + MA_ASSERT(pRB != NULL); + + /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */ + for (;;) { + ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 framesProcessedInDeviceFormat; + ma_uint64 framesProcessedInClientFormat; + void* pFramesInClientFormat; + + result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat); + if (result != MA_SUCCESS) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer.", result); + break; + } + + if (framesToProcessInClientFormat == 0) { + if (ma_pcm_rb_pointer_distance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) { + break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */ + } + } + + /* Convert. */ + framesProcessedInDeviceFormat = framesToProcessInDeviceFormat; + framesProcessedInClientFormat = framesToProcessInClientFormat; + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat); + if (result != MA_SUCCESS) { + break; + } + + result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInDeviceFormat, pFramesInClientFormat); /* Safe cast. */ + if (result != MA_SUCCESS) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer.", result); + break; + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */ + + /* We're done when we're unable to process any client nor device frames. */ + if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) { + break; /* Done. */ + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint8 playbackFramesInExternalFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 totalFramesToReadFromClient; + ma_uint32 totalFramesReadFromClient; + ma_uint32 totalFramesReadOut = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesInInternalFormat != NULL); + MA_ASSERT(pRB != NULL); + + /* + Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for + the whole frameCount frames we just use silence instead for the input data. + */ + MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames)); + + /* We need to calculate how many output frames are required to be read from the client to completely fill frameCount internal frames. */ + totalFramesToReadFromClient = (ma_uint32)ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, frameCount); + totalFramesReadFromClient = 0; + while (totalFramesReadFromClient < totalFramesToReadFromClient && ma_device_is_started(pDevice)) { + ma_uint32 framesRemainingFromClient; + ma_uint32 framesToProcessFromClient; + ma_uint32 inputFrameCount; + void* pInputFrames; + + framesRemainingFromClient = (totalFramesToReadFromClient - totalFramesReadFromClient); + framesToProcessFromClient = sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + if (framesToProcessFromClient > framesRemainingFromClient) { + framesToProcessFromClient = framesRemainingFromClient; + } + + /* We need to grab captured samples before firing the callback. If there's not enough input samples we just pass silence. */ + inputFrameCount = framesToProcessFromClient; + result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames); + if (result == MA_SUCCESS) { + if (inputFrameCount > 0) { + /* Use actual input frames. */ + ma_device__on_data(pDevice, playbackFramesInExternalFormat, pInputFrames, inputFrameCount); + } else { + if (ma_pcm_rb_pointer_distance(pRB) == 0) { + break; /* Underrun. */ + } + } + + /* We're done with the captured samples. */ + result = ma_pcm_rb_commit_read(pRB, inputFrameCount, pInputFrames); + if (result != MA_SUCCESS) { + break; /* Don't know what to do here... Just abandon ship. */ + } + } else { + /* Use silent input frames. */ + inputFrameCount = ma_min( + sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels), + sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels) + ); + + ma_device__on_data(pDevice, playbackFramesInExternalFormat, silentInputFrames, inputFrameCount); + } + + /* We have samples in external format so now we need to convert to internal format and output to the device. */ + { + ma_uint64 framesConvertedIn = inputFrameCount; + ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut); + ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackFramesInExternalFormat, &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut); + + totalFramesReadFromClient += (ma_uint32)framesConvertedIn; /* Safe cast. */ + totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */ + pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } + } + + return MA_SUCCESS; +} +#endif /* Asynchronous backends. */ + +/* A helper for changing the state of the device. */ +static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newState) +{ + ma_atomic_exchange_32(&pDevice->state, newState); +} + +/* A helper for getting the state of the device. */ +static MA_INLINE ma_uint32 ma_device__get_state(ma_device* pDevice) +{ + return pDevice->state; +} + + +#ifdef MA_WIN32 + GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; + GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; + /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ + /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ +#endif + + +typedef struct +{ + ma_device_type deviceType; + const ma_device_id* pDeviceID; + char* pName; + size_t nameBufferSize; + ma_bool32 foundDevice; +} ma_context__try_get_device_name_by_id__enum_callback_data; + +static ma_bool32 ma_context__try_get_device_name_by_id__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +{ + ma_context__try_get_device_name_by_id__enum_callback_data* pData = (ma_context__try_get_device_name_by_id__enum_callback_data*)pUserData; + MA_ASSERT(pData != NULL); + + if (pData->deviceType == deviceType) { + if (pContext->onDeviceIDEqual(pContext, pData->pDeviceID, &pDeviceInfo->id)) { + ma_strncpy_s(pData->pName, pData->nameBufferSize, pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } + } + + return !pData->foundDevice; +} + +/* +Generic function for retrieving the name of a device by it's ID. + +This function simply enumerates every device and then retrieves the name of the first device that has the same ID. +*/ +static ma_result ma_context__try_get_device_name_by_id(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, char* pName, size_t nameBufferSize) +{ + ma_result result; + ma_context__try_get_device_name_by_id__enum_callback_data data; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pName != NULL); + + if (pDeviceID == NULL) { + return MA_NO_DEVICE; + } + + data.deviceType = deviceType; + data.pDeviceID = pDeviceID; + data.pName = pName; + data.nameBufferSize = nameBufferSize; + data.foundDevice = MA_FALSE; + result = ma_context_enumerate_devices(pContext, ma_context__try_get_device_name_by_id__enum_callback, &data); + if (result != MA_SUCCESS) { + return result; + } + + if (!data.foundDevice) { + return MA_NO_DEVICE; + } else { + return MA_SUCCESS; + } +} + + +MA_API ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) { + if (g_maFormatPriorities[i] == format) { + return i; + } + } + + /* Getting here means the format could not be found or is equal to ma_format_unknown. */ + return (ma_uint32)-1; +} + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType); + + +/******************************************************************************* + +Null Backend + +*******************************************************************************/ +#ifdef MA_HAS_NULL + +#define MA_DEVICE_OP_NONE__NULL 0 +#define MA_DEVICE_OP_START__NULL 1 +#define MA_DEVICE_OP_SUSPEND__NULL 2 +#define MA_DEVICE_OP_KILL__NULL 3 + +static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; + MA_ASSERT(pDevice != NULL); + + for (;;) { /* Keep the thread alive until the device is uninitialized. */ + /* Wait for an operation to be requested. */ + ma_event_wait(&pDevice->null_device.operationEvent); + + /* At this point an event should have been triggered. */ + + /* Starting the device needs to put the thread into a loop. */ + if (pDevice->null_device.operation == MA_DEVICE_OP_START__NULL) { + ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL); + + /* Reset the timer just in case. */ + ma_timer_init(&pDevice->null_device.timer); + + /* Keep looping until an operation has been requested. */ + while (pDevice->null_device.operation != MA_DEVICE_OP_NONE__NULL && pDevice->null_device.operation != MA_DEVICE_OP_START__NULL) { + ma_sleep(10); /* Don't hog the CPU. */ + } + + /* Getting here means a suspend or kill operation has been requested. */ + ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS); + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + continue; + } + + /* Suspending the device means we need to stop the timer and just continue the loop. */ + if (pDevice->null_device.operation == MA_DEVICE_OP_SUSPEND__NULL) { + ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL); + + /* We need to add the current run time to the prior run time, then reset the timer. */ + pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer); + ma_timer_init(&pDevice->null_device.timer); + + /* We're done. */ + ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS); + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + continue; + } + + /* Killing the device means we need to get out of this loop so that this thread can terminate. */ + if (pDevice->null_device.operation == MA_DEVICE_OP_KILL__NULL) { + ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL); + ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS); + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + break; + } + + /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */ + if (pDevice->null_device.operation == MA_DEVICE_OP_NONE__NULL) { + MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */ + ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION); + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + continue; /* Continue the loop. Don't terminate. */ + } + } + + return (ma_thread_result)0; +} + +static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) +{ + ma_atomic_exchange_32(&pDevice->null_device.operation, operation); + if (!ma_event_signal(&pDevice->null_device.operationEvent)) { + return MA_ERROR; + } + + if (!ma_event_wait(&pDevice->null_device.operationCompletionEvent)) { + return MA_ERROR; + } + + return pDevice->null_device.operationResult; +} + +static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) +{ + ma_uint32 internalSampleRate; + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + internalSampleRate = pDevice->capture.internalSampleRate; + } else { + internalSampleRate = pDevice->playback.internalSampleRate; + } + + + return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate); +} + +static ma_bool32 ma_context_is_device_id_equal__null(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->nullbackend == pID1->nullbackend; +} + +static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_uint32 iFormat; + + MA_ASSERT(pContext != NULL); + + if (pDeviceID != NULL && pDeviceID->nullbackend != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1); + } + + /* Support everything on the null backend. */ + pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */ + for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) { + pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */ + } + + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = MA_MAX_CHANNELS; + pDeviceInfo->minSampleRate = MA_SAMPLE_RATE_8000; + pDeviceInfo->maxSampleRate = MA_SAMPLE_RATE_384000; + + (void)pContext; + (void)shareMode; + return MA_SUCCESS; +} + + +static void ma_device_uninit__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* Keep it clean and wait for the device thread to finish before returning. */ + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL); + + /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */ + ma_event_uninit(&pDevice->null_device.operationCompletionEvent); + ma_event_uninit(&pDevice->null_device.operationEvent); +} + +static ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_uint32 periodSizeInFrames; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->null_device); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + periodSizeInFrames = pConfig->periodSizeInFrames; + if (periodSizeInFrames == 0) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate); + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "NULL Capture Device", (size_t)-1); + pDevice->capture.internalFormat = pConfig->capture.format; + pDevice->capture.internalChannels = pConfig->capture.channels; + ma_channel_map_copy(pDevice->capture.internalChannelMap, pConfig->capture.channelMap, pConfig->capture.channels); + pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->capture.internalPeriods = pConfig->periods; + } + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "NULL Playback Device", (size_t)-1); + pDevice->playback.internalFormat = pConfig->playback.format; + pDevice->playback.internalChannels = pConfig->playback.channels; + ma_channel_map_copy(pDevice->playback.internalChannelMap, pConfig->playback.channelMap, pConfig->playback.channels); + pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->playback.internalPeriods = pConfig->periods; + } + + /* + In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the + first period is "written" to it, and then stopped in ma_device_stop__null(). + */ + result = ma_event_init(pContext, &pDevice->null_device.operationEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_event_init(pContext, &pDevice->null_device.operationCompletionEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_thread_create(pContext, &pDevice->thread, ma_device_thread__null, pDevice); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL); + + ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE); + return MA_SUCCESS; +} + +static ma_result ma_device_stop__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL); + + ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE); + return MA_SUCCESS; +} + +static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + ma_bool32 wasStartedOnEntry; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + wasStartedOnEntry = pDevice->null_device.isStarted; + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) { + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */ + (void)pPCMFrames; + + pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) { + pDevice->null_device.currentPeriodFramesRemainingPlayback = 0; + + if (!pDevice->null_device.isStarted && !wasStartedOnEntry) { + result = ma_device_start__null(pDevice); + if (result != MA_SUCCESS) { + break; + } + } + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFramePlayback; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!pDevice->null_device.isStarted) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalPeriodSizeInFrames; + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) { + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We need to ensured the output buffer is zeroed. */ + MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf); + + pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) { + pDevice->null_device.currentPeriodFramesRemainingCapture = 0; + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFrameCapture + pDevice->capture.internalPeriodSizeInFrames; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!pDevice->null_device.isStarted) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalPeriodSizeInFrames; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_device_main_loop__null(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + MA_ASSERT(pDevice != NULL); + + /* The capture device needs to be started immediately. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device_start__null(pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__null(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + /* At this point we have our captured data in device format and we now need to convert it to client format. */ + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__null(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__null()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__null(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__null(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + + /* Here is where the device is started. */ + ma_device_stop__null(pDevice); + + return result; +} + +static ma_result ma_context_uninit__null(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_null); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__null(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->onUninit = ma_context_uninit__null; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__null; + pContext->onEnumDevices = ma_context_enumerate_devices__null; + pContext->onGetDeviceInfo = ma_context_get_device_info__null; + pContext->onDeviceInit = ma_device_init__null; + pContext->onDeviceUninit = ma_device_uninit__null; + pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceMainLoop = ma_device_main_loop__null; + + /* The null backend always works. */ + return MA_SUCCESS; +} +#endif + + +/******************************************************************************* + +WIN32 COMMON + +*******************************************************************************/ +#if defined(MA_WIN32) +#if defined(MA_WIN32_DESKTOP) + #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit) + #define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)() + #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv) + #define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv) + #define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar) +#else + #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit) + #define ma_CoUninitialize(pContext) CoUninitialize() + #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv) + #define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv) + #define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar) +#endif + +#if !defined(MAXULONG_PTR) +typedef size_t DWORD_PTR; +#endif + +#if !defined(WAVE_FORMAT_44M08) +#define WAVE_FORMAT_44M08 0x00000100 +#define WAVE_FORMAT_44S08 0x00000200 +#define WAVE_FORMAT_44M16 0x00000400 +#define WAVE_FORMAT_44S16 0x00000800 +#define WAVE_FORMAT_48M08 0x00001000 +#define WAVE_FORMAT_48S08 0x00002000 +#define WAVE_FORMAT_48M16 0x00004000 +#define WAVE_FORMAT_48S16 0x00008000 +#define WAVE_FORMAT_96M08 0x00010000 +#define WAVE_FORMAT_96S08 0x00020000 +#define WAVE_FORMAT_96M16 0x00040000 +#define WAVE_FORMAT_96S16 0x00080000 +#endif + +#ifndef SPEAKER_FRONT_LEFT +#define SPEAKER_FRONT_LEFT 0x1 +#define SPEAKER_FRONT_RIGHT 0x2 +#define SPEAKER_FRONT_CENTER 0x4 +#define SPEAKER_LOW_FREQUENCY 0x8 +#define SPEAKER_BACK_LEFT 0x10 +#define SPEAKER_BACK_RIGHT 0x20 +#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40 +#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80 +#define SPEAKER_BACK_CENTER 0x100 +#define SPEAKER_SIDE_LEFT 0x200 +#define SPEAKER_SIDE_RIGHT 0x400 +#define SPEAKER_TOP_CENTER 0x800 +#define SPEAKER_TOP_FRONT_LEFT 0x1000 +#define SPEAKER_TOP_FRONT_CENTER 0x2000 +#define SPEAKER_TOP_FRONT_RIGHT 0x4000 +#define SPEAKER_TOP_BACK_LEFT 0x8000 +#define SPEAKER_TOP_BACK_CENTER 0x10000 +#define SPEAKER_TOP_BACK_RIGHT 0x20000 +#endif + +/* +The SDK that comes with old versions of MSVC (VC6, for example) does not appear to define WAVEFORMATEXTENSIBLE. We +define our own implementation in this case. +*/ +#if (defined(_MSC_VER) && !defined(_WAVEFORMATEXTENSIBLE_)) || defined(__DMC__) +typedef struct +{ + WAVEFORMATEX Format; + union + { + WORD wValidBitsPerSample; + WORD wSamplesPerBlock; + WORD wReserved; + } Samples; + DWORD dwChannelMask; + GUID SubFormat; +} WAVEFORMATEXTENSIBLE; +#endif + +#ifndef WAVE_FORMAT_EXTENSIBLE +#define WAVE_FORMAT_EXTENSIBLE 0xFFFE +#endif + +#ifndef WAVE_FORMAT_IEEE_FLOAT +#define WAVE_FORMAT_IEEE_FLOAT 0x0003 +#endif + +static GUID MA_GUID_NULL = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}; + +/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__win32(DWORD id) +{ + switch (id) + { + case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */ +static DWORD ma_channel_id_to_win32(DWORD id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to a Win32-style channel mask. */ +static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) +{ + DWORD dwChannelMask = 0; + ma_uint32 iChannel; + + for (iChannel = 0; iChannel < channels; ++iChannel) { + dwChannelMask |= ma_channel_id_to_win32(channelMap[iChannel]); + } + + return dwChannelMask; +} + +/* Converts a Win32-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + if (channels == 1 && dwChannelMask == 0) { + channelMap[0] = MA_CHANNEL_MONO; + } else if (channels == 2 && dwChannelMask == 0) { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) { + channelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + + for (iBit = 0; iBit < 32; ++iBit) { + DWORD bitValue = (dwChannelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + channelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue); + iChannel += 1; + } + } + } + } +} + +#ifdef __cplusplus +static ma_bool32 ma_is_guid_equal(const void* a, const void* b) +{ + return IsEqualGUID(*(const GUID*)a, *(const GUID*)b); +} +#else +#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b) +#endif + +static ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF) +{ + MA_ASSERT(pWF != NULL); + + if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + const WAVEFORMATEXTENSIBLE* pWFEX = (const WAVEFORMATEXTENSIBLE*)pWF; + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_s32; + } + if (pWFEX->Samples.wValidBitsPerSample == 24) { + if (pWFEX->Format.wBitsPerSample == 32) { + /*return ma_format_s24_32;*/ + } + if (pWFEX->Format.wBitsPerSample == 24) { + return ma_format_s24; + } + } + if (pWFEX->Samples.wValidBitsPerSample == 16) { + return ma_format_s16; + } + if (pWFEX->Samples.wValidBitsPerSample == 8) { + return ma_format_u8; + } + } + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_f32; + } + /* + if (pWFEX->Samples.wValidBitsPerSample == 64) { + return ma_format_f64; + } + */ + } + } else { + if (pWF->wFormatTag == WAVE_FORMAT_PCM) { + if (pWF->wBitsPerSample == 32) { + return ma_format_s32; + } + if (pWF->wBitsPerSample == 24) { + return ma_format_s24; + } + if (pWF->wBitsPerSample == 16) { + return ma_format_s16; + } + if (pWF->wBitsPerSample == 8) { + return ma_format_u8; + } + } + if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) { + if (pWF->wBitsPerSample == 32) { + return ma_format_f32; + } + if (pWF->wBitsPerSample == 64) { + /*return ma_format_f64;*/ + } + } + } + + return ma_format_unknown; +} +#endif + + +/******************************************************************************* + +WASAPI Backend + +*******************************************************************************/ +#ifdef MA_HAS_WASAPI +#if 0 +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */ +#endif +#include +#include +#if defined(_MSC_VER) + #pragma warning(pop) +#endif +#endif /* 0 */ + +/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */ +#define MA_WIN32_WINNT_VISTA 0x0600 +#define MA_VER_MINORVERSION 0x01 +#define MA_VER_MAJORVERSION 0x02 +#define MA_VER_SERVICEPACKMAJOR 0x20 +#define MA_VER_GREATER_EQUAL 0x03 + +typedef struct { + DWORD dwOSVersionInfoSize; + DWORD dwMajorVersion; + DWORD dwMinorVersion; + DWORD dwBuildNumber; + DWORD dwPlatformId; + WCHAR szCSDVersion[128]; + WORD wServicePackMajor; + WORD wServicePackMinor; + WORD wSuiteMask; + BYTE wProductType; + BYTE wReserved; +} ma_OSVERSIONINFOEXW; + +typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask); +typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask); + + +#ifndef PROPERTYKEY_DEFINED +#define PROPERTYKEY_DEFINED +typedef struct +{ + GUID fmtid; + DWORD pid; +} PROPERTYKEY; +#endif + +/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */ +static MA_INLINE void ma_PropVariantInit(PROPVARIANT* pProp) +{ + MA_ZERO_OBJECT(pProp); +} + + +static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14}; +static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0}; + +static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */ +static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */ + +static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */ +static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */ +static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */ +static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */ +static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */ +static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */ +#ifndef MA_WIN32_DESKTOP +static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */ +static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */ +static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */ +#endif + +static const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */ +static const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */ +#ifdef __cplusplus +#define MA_CLSID_MMDeviceEnumerator MA_CLSID_MMDeviceEnumerator_Instance +#define MA_IID_IMMDeviceEnumerator MA_IID_IMMDeviceEnumerator_Instance +#else +#define MA_CLSID_MMDeviceEnumerator &MA_CLSID_MMDeviceEnumerator_Instance +#define MA_IID_IMMDeviceEnumerator &MA_IID_IMMDeviceEnumerator_Instance +#endif + +typedef struct ma_IUnknown ma_IUnknown; +#ifdef MA_WIN32_DESKTOP +#define MA_MM_DEVICE_STATE_ACTIVE 1 +#define MA_MM_DEVICE_STATE_DISABLED 2 +#define MA_MM_DEVICE_STATE_NOTPRESENT 4 +#define MA_MM_DEVICE_STATE_UNPLUGGED 8 + +typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator; +typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection; +typedef struct ma_IMMDevice ma_IMMDevice; +#else +typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler; +typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation; +#endif +typedef struct ma_IPropertyStore ma_IPropertyStore; +typedef struct ma_IAudioClient ma_IAudioClient; +typedef struct ma_IAudioClient2 ma_IAudioClient2; +typedef struct ma_IAudioClient3 ma_IAudioClient3; +typedef struct ma_IAudioRenderClient ma_IAudioRenderClient; +typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient; + +typedef ma_int64 MA_REFERENCE_TIME; + +#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000 +#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000 +#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000 +#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000 +#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000 +#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000 +#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000 +#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000 + +/* Buffer flags. */ +#define MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY 1 +#define MA_AUDCLNT_BUFFERFLAGS_SILENT 2 +#define MA_AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR 4 + +typedef enum +{ + ma_eRender = 0, + ma_eCapture = 1, + ma_eAll = 2 +} ma_EDataFlow; + +typedef enum +{ + ma_eConsole = 0, + ma_eMultimedia = 1, + ma_eCommunications = 2 +} ma_ERole; + +typedef enum +{ + MA_AUDCLNT_SHAREMODE_SHARED, + MA_AUDCLNT_SHAREMODE_EXCLUSIVE +} MA_AUDCLNT_SHAREMODE; + +typedef enum +{ + MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */ +} MA_AUDIO_STREAM_CATEGORY; + +typedef struct +{ + UINT32 cbSize; + BOOL bIsOffload; + MA_AUDIO_STREAM_CATEGORY eCategory; +} ma_AudioClientProperties; + +/* IUnknown */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis); +} ma_IUnknownVtbl; +struct ma_IUnknown +{ + ma_IUnknownVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); } + +#ifdef MA_WIN32_DESKTOP + /* IMMNotificationClient */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis); + + /* IMMNotificationClient */ + HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState); + HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID); + HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key); + } ma_IMMNotificationClientVtbl; + + /* IMMDeviceEnumerator */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis); + + /* IMMDeviceEnumerator */ + HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices); + HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint); + HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice); + HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); + HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); + } ma_IMMDeviceEnumeratorVtbl; + struct ma_IMMDeviceEnumerator + { + ma_IMMDeviceEnumeratorVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); } + + + /* IMMDeviceCollection */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis); + + /* IMMDeviceCollection */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices); + HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice); + } ma_IMMDeviceCollectionVtbl; + struct ma_IMMDeviceCollection + { + ma_IMMDeviceCollectionVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); } + static MA_INLINE HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); } + + + /* IMMDevice */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis); + + /* IMMDevice */ + HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface); + HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties); + HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, LPWSTR *pID); + HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState); + } ma_IMMDeviceVtbl; + struct ma_IMMDevice + { + ma_IMMDeviceVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); } + static MA_INLINE HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); } + static MA_INLINE HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); } + static MA_INLINE HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); } +#else + /* IActivateAudioInterfaceAsyncOperation */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + + /* IActivateAudioInterfaceAsyncOperation */ + HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface); + } ma_IActivateAudioInterfaceAsyncOperationVtbl; + struct ma_IActivateAudioInterfaceAsyncOperation + { + ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); } +#endif + +/* IPropertyStore */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis); + + /* IPropertyStore */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount); + HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey); + HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar); + HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar); + HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis); +} ma_IPropertyStoreVtbl; +struct ma_IPropertyStore +{ + ma_IPropertyStoreVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); } +static MA_INLINE HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); } +static MA_INLINE HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); } + + +/* IAudioClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp); +} ma_IAudioClientVtbl; +struct ma_IAudioClient +{ + ma_IAudioClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } + +/* IAudioClient2 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); +} ma_IAudioClient2Vtbl; +struct ma_IAudioClient2 +{ + ma_IAudioClient2Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } + + +/* IAudioClient3 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); + + /* IAudioClient3 */ + HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); +} ma_IAudioClient3Vtbl; +struct ma_IAudioClient3 +{ + ma_IAudioClient3Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } +static MA_INLINE HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); } + + +/* IAudioRenderClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags); +} ma_IAudioRenderClientVtbl; +struct ma_IAudioRenderClient +{ + ma_IAudioRenderClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); } +static MA_INLINE HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); } + + +/* IAudioCaptureClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead); + HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket); +} ma_IAudioCaptureClientVtbl; +struct ma_IAudioCaptureClient +{ + ma_IAudioCaptureClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); } + +#ifndef MA_WIN32_DESKTOP +#include +typedef struct ma_completion_handler_uwp ma_completion_handler_uwp; + +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis); + + /* IActivateAudioInterfaceCompletionHandler */ + HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation); +} ma_completion_handler_uwp_vtbl; +struct ma_completion_handler_uwp +{ + ma_completion_handler_uwp_vtbl* lpVtbl; + ma_uint32 counter; + HANDLE hEvent; +}; + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject) +{ + /* + We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To + "implement" this, we just make sure we return pThis when the IAgileObject is requested. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis) +{ + return (ULONG)ma_atomic_increment_32(&pThis->counter); +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis) +{ + ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter); + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation) +{ + (void)pActivateOperation; + SetEvent(pThis->hEvent); + return S_OK; +} + + +static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = { + ma_completion_handler_uwp_QueryInterface, + ma_completion_handler_uwp_AddRef, + ma_completion_handler_uwp_Release, + ma_completion_handler_uwp_ActivateCompleted +}; + +static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) +{ + MA_ASSERT(pHandler != NULL); + MA_ZERO_OBJECT(pHandler); + + pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance; + pHandler->counter = 1; + pHandler->hEvent = CreateEventW(NULL, FALSE, FALSE, NULL); + if (pHandler->hEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler) +{ + if (pHandler->hEvent != NULL) { + CloseHandle(pHandler->hEvent); + } +} + +static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) +{ + WaitForSingleObject(pHandler->hEvent, INFINITE); +} +#endif /* !MA_WIN32_DESKTOP */ + +/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */ +#ifdef MA_WIN32_DESKTOP +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject) +{ + /* + We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else + we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis) +{ + return (ULONG)ma_atomic_increment_32(&pThis->counter); +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis) +{ + ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter); + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState) +{ +#ifdef MA_DEBUG_OUTPUT + printf("IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState); +#endif + + (void)pThis; + (void)pDeviceID; + (void)dwNewState; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + printf("IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + printf("IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + printf("IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)"); +#endif + + /* We only ever use the eConsole role in miniaudio. */ + if (role != ma_eConsole) { + return S_OK; + } + + /* We only care about devices with the same data flow and role as the current device. */ + if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) || + (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture)) { + return S_OK; + } + + /* Don't do automatic stream routing if we're not allowed. */ + if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) || + (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) { + return S_OK; + } + + /* + Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to + AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once + it's fixed. + */ + if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) || + (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) { + return S_OK; + } + + /* + We don't change the device here - we change it in the worker thread to keep synchronization simple. To do this I'm just setting a flag to + indicate that the default device has changed. Loopback devices are treated as capture devices so we need to do a bit of a dance to handle + that properly. + */ + if (dataFlow == ma_eRender && pThis->pDevice->type != ma_device_type_loopback) { + ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_TRUE); + } + if (dataFlow == ma_eCapture || pThis->pDevice->type == ma_device_type_loopback) { + ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_TRUE); + } + + (void)pDefaultDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key) +{ +#ifdef MA_DEBUG_OUTPUT + printf("IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); +#endif + + (void)pThis; + (void)pDeviceID; + (void)key; + return S_OK; +} + +static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = { + ma_IMMNotificationClient_QueryInterface, + ma_IMMNotificationClient_AddRef, + ma_IMMNotificationClient_Release, + ma_IMMNotificationClient_OnDeviceStateChanged, + ma_IMMNotificationClient_OnDeviceAdded, + ma_IMMNotificationClient_OnDeviceRemoved, + ma_IMMNotificationClient_OnDefaultDeviceChanged, + ma_IMMNotificationClient_OnPropertyValueChanged +}; +#endif /* MA_WIN32_DESKTOP */ + +#ifdef MA_WIN32_DESKTOP +typedef ma_IMMDevice ma_WASAPIDeviceInterface; +#else +typedef ma_IUnknown ma_WASAPIDeviceInterface; +#endif + + + +static ma_bool32 ma_context_is_device_id_equal__wasapi(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return memcmp(pID0->wasapi, pID1->wasapi, sizeof(pID0->wasapi)) == 0; +} + +static void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_info* pInfo) +{ + MA_ASSERT(pWF != NULL); + MA_ASSERT(pInfo != NULL); + + pInfo->formatCount = 1; + pInfo->formats[0] = ma_format_from_WAVEFORMATEX(pWF); + pInfo->minChannels = pWF->nChannels; + pInfo->maxChannels = pWF->nChannels; + pInfo->minSampleRate = pWF->nSamplesPerSec; + pInfo->maxSampleRate = pWF->nSamplesPerSec; +} + +static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_share_mode shareMode, ma_device_info* pInfo) +{ + MA_ASSERT(pAudioClient != NULL); + MA_ASSERT(pInfo != NULL); + + /* We use a different technique to retrieve the device information depending on whether or not we are using shared or exclusive mode. */ + if (shareMode == ma_share_mode_shared) { + /* Shared Mode. We use GetMixFormat() here. */ + WAVEFORMATEX* pWF = NULL; + HRESULT hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (WAVEFORMATEX**)&pWF); + if (SUCCEEDED(hr)) { + ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo); + return MA_SUCCESS; + } else { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval.", ma_result_from_HRESULT(hr)); + } + } else { + /* Exlcusive Mode. We repeatedly call IsFormatSupported() here. This is not currently support on UWP. */ +#ifdef MA_WIN32_DESKTOP + /* + The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is + correct which will simplify our searching. + */ + ma_IPropertyStore *pProperties; + HRESULT hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + PROPVARIANT var; + ma_PropVariantInit(&var); + + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var); + if (SUCCEEDED(hr)) { + WAVEFORMATEX* pWF = (WAVEFORMATEX*)var.blob.pBlobData; + ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo); + + /* + In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format + first. If this fails, fall back to a search. + */ + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL); + ma_PropVariantClear(pContext, &var); + + if (FAILED(hr)) { + /* + The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel + count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format. + */ + ma_uint32 channels = pInfo->minChannels; + ma_format formatsToSearch[] = { + ma_format_s16, + ma_format_s24, + /*ma_format_s24_32,*/ + ma_format_f32, + ma_format_s32, + ma_format_u8 + }; + ma_channel defaultChannelMap[MA_MAX_CHANNELS]; + WAVEFORMATEXTENSIBLE wf; + ma_bool32 found; + ma_uint32 iFormat; + + ma_get_standard_channel_map(ma_standard_channel_map_microsoft, channels, defaultChannelMap); + + MA_ZERO_OBJECT(&wf); + wf.Format.cbSize = sizeof(wf); + wf.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + wf.Format.nChannels = (WORD)channels; + wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels); + + found = MA_FALSE; + for (iFormat = 0; iFormat < ma_countof(formatsToSearch); ++iFormat) { + ma_format format = formatsToSearch[iFormat]; + ma_uint32 iSampleRate; + + wf.Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8; + wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8; + wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.Format.wBitsPerSample; + if (format == ma_format_f32) { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } else { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } + + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) { + wf.Format.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate]; + + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + ma_set_device_info_from_WAVEFORMATEX((WAVEFORMATEX*)&wf, pInfo); + found = MA_TRUE; + break; + } + } + + if (found) { + break; + } + } + + if (!found) { + ma_IPropertyStore_Release(pProperties); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to find suitable device format for device info retrieval.", MA_FORMAT_NOT_SUPPORTED); + } + } + } else { + ma_IPropertyStore_Release(pProperties); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve device format for device info retrieval.", ma_result_from_HRESULT(hr)); + } + + ma_IPropertyStore_Release(pProperties); + } else { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to open property store for device info retrieval.", ma_result_from_HRESULT(hr)); + } + + return MA_SUCCESS; +#else + /* Exclusive mode not fully supported in UWP right now. */ + return MA_ERROR; +#endif + } +} + +#ifdef MA_WIN32_DESKTOP +static ma_EDataFlow ma_device_type_to_EDataFlow(ma_device_type deviceType) +{ + if (deviceType == ma_device_type_playback) { + return ma_eRender; + } else if (deviceType == ma_device_type_capture) { + return ma_eCapture; + } else { + MA_ASSERT(MA_FALSE); + return ma_eRender; /* Should never hit this. */ + } +} + +static ma_result ma_context_create_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator** ppDeviceEnumerator) +{ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDeviceEnumerator != NULL); + + hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr)); + } + + *ppDeviceEnumerator = pDeviceEnumerator; + + return MA_SUCCESS; +} + +static LPWSTR ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType) +{ + HRESULT hr; + ma_IMMDevice* pMMDefaultDevice = NULL; + LPWSTR pDefaultDeviceID = NULL; + ma_EDataFlow dataFlow; + ma_ERole role; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceEnumerator != NULL); + + /* Grab the EDataFlow type from the device type. */ + dataFlow = ma_device_type_to_EDataFlow(deviceType); + + /* The role is always eConsole, but we may make this configurable later. */ + role = ma_eConsole; + + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, dataFlow, role, &pMMDefaultDevice); + if (FAILED(hr)) { + return NULL; + } + + hr = ma_IMMDevice_GetId(pMMDefaultDevice, &pDefaultDeviceID); + + ma_IMMDevice_Release(pMMDefaultDevice); + pMMDefaultDevice = NULL; + + if (FAILED(hr)) { + return NULL; + } + + return pDefaultDeviceID; +} + +static LPWSTR ma_context_get_default_device_id__wasapi(ma_context* pContext, ma_device_type deviceType) /* Free the returned pointer with ma_CoTaskMemFree() */ +{ + ma_result result; + ma_IMMDeviceEnumerator* pDeviceEnumerator = NULL; + LPWSTR pDefaultDeviceID = NULL; + + MA_ASSERT(pContext != NULL); + + result = ma_context_create_IMMDeviceEnumerator__wasapi(pContext, &pDeviceEnumerator); + if (result != MA_SUCCESS) { + return NULL; + } + + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + return pDefaultDeviceID; +} + +static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice) +{ + ma_IMMDeviceEnumerator* pDeviceEnumerator; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppMMDevice != NULL); + + hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.", ma_result_from_HRESULT(hr)); + } + + if (pDeviceID == NULL) { + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_capture) ? ma_eCapture : ma_eRender, ma_eConsole, ppMMDevice); + } else { + hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice); + } + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.", ma_result_from_HRESULT(hr)); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_share_mode shareMode, LPWSTR pDefaultDeviceID, ma_bool32 onlySimpleInfo, ma_device_info* pInfo) +{ + LPWSTR pDeviceID; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMMDevice != NULL); + MA_ASSERT(pInfo != NULL); + + /* ID. */ + hr = ma_IMMDevice_GetId(pMMDevice, &pDeviceID); + if (SUCCEEDED(hr)) { + size_t idlen = wcslen(pDeviceID); + if (idlen+1 > ma_countof(pInfo->id.wasapi)) { + ma_CoTaskMemFree(pContext, pDeviceID); + MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */ + return MA_ERROR; + } + + MA_COPY_MEMORY(pInfo->id.wasapi, pDeviceID, idlen * sizeof(wchar_t)); + pInfo->id.wasapi[idlen] = '\0'; + + if (pDefaultDeviceID != NULL) { + if (wcscmp(pDeviceID, pDefaultDeviceID) == 0) { + /* It's a default device. */ + pInfo->_private.isDefault = MA_TRUE; + } + } + + ma_CoTaskMemFree(pContext, pDeviceID); + } + + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + PROPVARIANT var; + + /* Description / Friendly Name */ + ma_PropVariantInit(&var); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE); + ma_PropVariantClear(pContext, &var); + } + + ma_IPropertyStore_Release(pProperties); + } + } + + /* Format */ + if (!onlySimpleInfo) { + ma_IAudioClient* pAudioClient; + hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); + if (SUCCEEDED(hr)) { + ma_result result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, shareMode, pInfo); + + ma_IAudioClient_Release(pAudioClient); + return result; + } else { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval.", ma_result_from_HRESULT(hr)); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices_by_type__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + UINT deviceCount; + HRESULT hr; + ma_uint32 iDevice; + LPWSTR pDefaultDeviceID = NULL; + ma_IMMDeviceCollection* pDeviceCollection = NULL; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Grab the default device. We use this to know whether or not flag the returned device info as being the default. */ + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + /* We need to enumerate the devices which returns a device collection. */ + hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_device_type_to_EDataFlow(deviceType), MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection); + if (SUCCEEDED(hr)) { + hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount); + if (FAILED(hr)) { + result = ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get device count.", ma_result_from_HRESULT(hr)); + goto done; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + ma_IMMDevice* pMMDevice; + + MA_ZERO_OBJECT(&deviceInfo); + + hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice); + if (SUCCEEDED(hr)) { + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, ma_share_mode_shared, pDefaultDeviceID, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */ + + ma_IMMDevice_Release(pMMDevice); + if (result == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + break; + } + } + } + } + } + +done: + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + if (pDeviceCollection != NULL) { + ma_IMMDeviceCollection_Release(pDeviceCollection); + pDeviceCollection = NULL; + } + + return result; +} + +static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + MA_ASSERT(ppMMDevice != NULL); + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)ppAudioClient); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + return MA_SUCCESS; +} +#else +static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface) +{ + ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL; + ma_completion_handler_uwp completionHandler; + IID iid; + LPOLESTR iidStr; + HRESULT hr; + ma_result result; + HRESULT activateResult; + ma_IUnknown* pActivatedInterface; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + + if (pDeviceID != NULL) { + MA_COPY_MEMORY(&iid, pDeviceID->wasapi, sizeof(iid)); + } else { + if (deviceType == ma_device_type_playback) { + iid = MA_IID_DEVINTERFACE_AUDIO_RENDER; + } else { + iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE; + } + } + +#if defined(__cplusplus) + hr = StringFromIID(iid, &iidStr); +#else + hr = StringFromIID(&iid, &iidStr); +#endif + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.", ma_result_from_HRESULT(hr)); + } + + result = ma_completion_handler_uwp_init(&completionHandler); + if (result != MA_SUCCESS) { + ma_CoTaskMemFree(pContext, iidStr); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().", result); + } + +#if defined(__cplusplus) + hr = ActivateAudioInterfaceAsync(iidStr, MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp); +#else + hr = ActivateAudioInterfaceAsync(iidStr, &MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp); +#endif + if (FAILED(hr)) { + ma_completion_handler_uwp_uninit(&completionHandler); + ma_CoTaskMemFree(pContext, iidStr); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.", ma_result_from_HRESULT(hr)); + } + + ma_CoTaskMemFree(pContext, iidStr); + + /* Wait for the async operation for finish. */ + ma_completion_handler_uwp_wait(&completionHandler); + ma_completion_handler_uwp_uninit(&completionHandler); + + hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface); + ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp); + + if (FAILED(hr) || FAILED(activateResult)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.", FAILED(hr) ? ma_result_from_HRESULT(hr) : ma_result_from_HRESULT(activateResult)); + } + + /* Here is where we grab the IAudioClient interface. */ + hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.", ma_result_from_HRESULT(hr)); + } + + if (ppActivatedInterface) { + *ppActivatedInterface = pActivatedInterface; + } else { + ma_IUnknown_Release(pActivatedInterface); + } + + return MA_SUCCESS; +} +#endif + +static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface) +{ +#ifdef MA_WIN32_DESKTOP + return ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface); +#else + return ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface); +#endif +} + + +static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + /* Different enumeration for desktop and UWP. */ +#ifdef MA_WIN32_DESKTOP + /* Desktop */ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr)); + } + + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_playback, callback, pUserData); + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_capture, callback, pUserData); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); +#else + /* + UWP + + The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate + over devices without using MMDevice, I'm restricting devices to defaults. + + Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/ + */ + if (callback) { + ma_bool32 cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo._private.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo._private.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ +#ifdef MA_WIN32_DESKTOP + ma_result result; + ma_IMMDevice* pMMDevice = NULL; + LPWSTR pDefaultDeviceID = NULL; + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + /* We need the default device ID so we can set the isDefault flag in the device info. */ + pDefaultDeviceID = ma_context_get_default_device_id__wasapi(pContext, deviceType); + + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, shareMode, pDefaultDeviceID, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */ + + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + ma_IMMDevice_Release(pMMDevice); + + return result; +#else + ma_IAudioClient* pAudioClient; + ma_result result; + + /* UWP currently only uses default devices. */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Not currently supporting exclusive mode on UWP. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_ERROR; + } + + result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, &pAudioClient, NULL); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, shareMode, pDeviceInfo); + + pDeviceInfo->_private.isDefault = MA_TRUE; /* UWP only supports default devices. */ + + ma_IAudioClient_Release(pAudioClient); + return result; +#endif +} + +static void ma_device_uninit__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + +#ifdef MA_WIN32_DESKTOP + if (pDevice->wasapi.pDeviceEnumerator) { + ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient); + ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator); + } +#endif + + if (pDevice->wasapi.pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + } + if (pDevice->wasapi.pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + } + + if (pDevice->wasapi.pAudioClientPlayback) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + } + if (pDevice->wasapi.pAudioClientCapture) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + } + + if (pDevice->wasapi.hEventPlayback) { + CloseHandle(pDevice->wasapi.hEventPlayback); + } + if (pDevice->wasapi.hEventCapture) { + CloseHandle(pDevice->wasapi.hEventCapture); + } +} + + +typedef struct +{ + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_bool32 usingDefaultFormat; + ma_bool32 usingDefaultChannels; + ma_bool32 usingDefaultSampleRate; + ma_bool32 usingDefaultChannelMap; + ma_share_mode shareMode; + ma_bool32 noAutoConvertSRC; + ma_bool32 noDefaultQualitySRC; + ma_bool32 noHardwareOffloading; + + /* Output. */ + ma_IAudioClient* pAudioClient; + ma_IAudioRenderClient* pRenderClient; + ma_IAudioCaptureClient* pCaptureClient; + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + ma_bool32 usingAudioClient3; + char deviceName[256]; +} ma_device_init_internal_data__wasapi; + +static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData) +{ + HRESULT hr; + ma_result result = MA_SUCCESS; + const char* errorMsg = ""; + MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + DWORD streamFlags = 0; + MA_REFERENCE_TIME periodDurationInMicroseconds; + ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE; + WAVEFORMATEXTENSIBLE wf; + ma_WASAPIDeviceInterface* pDeviceInterface = NULL; + ma_IAudioClient2* pAudioClient2; + ma_uint32 nativeSampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pData != NULL); + + /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + pData->pAudioClient = NULL; + pData->pRenderClient = NULL; + pData->pCaptureClient = NULL; + + streamFlags = MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + if (!pData->noAutoConvertSRC && !pData->usingDefaultSampleRate && pData->shareMode != ma_share_mode_exclusive) { /* <-- Exclusive streams must use the native sample rate. */ + streamFlags |= MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; + } + if (!pData->noDefaultQualitySRC && !pData->usingDefaultSampleRate && (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) != 0) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; + } + if (deviceType == ma_device_type_loopback) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_LOOPBACK; + } + + result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, &pData->pAudioClient, &pDeviceInterface); + if (result != MA_SUCCESS) { + goto done; + } + + MA_ZERO_OBJECT(&wf); + + /* Try enabling hardware offloading. */ + if (!pData->noHardwareOffloading) { + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2); + if (SUCCEEDED(hr)) { + BOOL isHardwareOffloadingSupported = 0; + hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported); + if (SUCCEEDED(hr) && isHardwareOffloadingSupported) { + ma_AudioClientProperties clientProperties; + MA_ZERO_OBJECT(&clientProperties); + clientProperties.cbSize = sizeof(clientProperties); + clientProperties.bIsOffload = 1; + clientProperties.eCategory = MA_AudioCategory_Other; + ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties); + } + + pAudioClient2->lpVtbl->Release(pAudioClient2); + } + } + + /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */ + result = MA_FORMAT_NOT_SUPPORTED; + if (pData->shareMode == ma_share_mode_exclusive) { + #ifdef MA_WIN32_DESKTOP + /* In exclusive mode on desktop we always use the backend's native format. */ + ma_IPropertyStore* pStore = NULL; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore); + if (SUCCEEDED(hr)) { + PROPVARIANT prop; + ma_PropVariantInit(&prop); + hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop); + if (SUCCEEDED(hr)) { + WAVEFORMATEX* pActualFormat = (WAVEFORMATEX*)prop.blob.pBlobData; + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL); + if (SUCCEEDED(hr)) { + MA_COPY_MEMORY(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE)); + } + + ma_PropVariantClear(pContext, &prop); + } + + ma_IPropertyStore_Release(pStore); + } + #else + /* + I do not know how to query the device's native format on UWP so for now I'm just disabling support for + exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported() + until you find one that works. + + TODO: Add support for exclusive mode to UWP. + */ + hr = S_FALSE; + #endif + + if (hr == S_OK) { + shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE; + result = MA_SUCCESS; + } else { + result = MA_SHARE_MODE_NOT_SUPPORTED; + } + } else { + /* In shared mode we are always using the format reported by the operating system. */ + WAVEFORMATEXTENSIBLE* pNativeFormat = NULL; + hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (WAVEFORMATEX**)&pNativeFormat); + if (hr != S_OK) { + result = MA_FORMAT_NOT_SUPPORTED; + } else { + MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(wf)); + result = MA_SUCCESS; + } + + ma_CoTaskMemFree(pContext, pNativeFormat); + + shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + } + + /* Return an error if we still haven't found a format. */ + if (result != MA_SUCCESS) { + errorMsg = "[WASAPI] Failed to find best device mix format."; + goto done; + } + + /* + Override the native sample rate with the one requested by the caller, but only if we're not using the default sample rate. We'll use + WASAPI to perform the sample rate conversion. + */ + nativeSampleRate = wf.Format.nSamplesPerSec; + if (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) { + wf.Format.nSamplesPerSec = pData->sampleRateIn; + wf.Format.nAvgBytesPerSec = wf.Format.nSamplesPerSec * wf.Format.nBlockAlign; + } + + pData->formatOut = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)&wf); + pData->channelsOut = wf.Format.nChannels; + pData->sampleRateOut = wf.Format.nSamplesPerSec; + + /* Get the internal channel map based on the channel mask. */ + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut); + + /* Period size. */ + pData->periodsOut = pData->periodsIn; + pData->periodSizeInFramesOut = pData->periodSizeInFramesIn; + if (pData->periodSizeInFramesOut == 0) { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, wf.Format.nSamplesPerSec); + } + + periodDurationInMicroseconds = ((ma_uint64)pData->periodSizeInFramesOut * 1000 * 1000) / wf.Format.nSamplesPerSec; + + + /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */ + if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * 10; + + /* + If the periodicy is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing + it and trying it again. + */ + hr = E_FAIL; + for (;;) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL); + if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) { + if (bufferDuration > 500*10000) { + break; + } else { + if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinit loop. Should never happen, but it makes me feel better. */ + break; + } + + bufferDuration = bufferDuration * 2; + continue; + } + } else { + break; + } + } + + if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { + ma_uint32 bufferSizeInFrames; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (SUCCEEDED(hr)) { + bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.Format.nSamplesPerSec * bufferSizeInFrames) + 0.5); + + /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */ + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + + #ifdef MA_WIN32_DESKTOP + hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient); + #else + hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient); + #endif + + if (SUCCEEDED(hr)) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL); + } + } + } + + if (FAILED(hr)) { + /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */ + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = ma_result_from_HRESULT(hr); + } + goto done; + } + } + + if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) { + /* + Low latency shared mode via IAudioClient3. + + NOTE + ==== + Contrary to the documentation on MSDN (https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient3-initializesharedaudiostream), the + use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM and AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY with IAudioClient3_InitializeSharedAudioStream() absolutely does not work. Using + any of these flags will result in HRESULT code 0x88890021. The other problem is that calling IAudioClient3_GetSharedModeEnginePeriod() with a sample rate different to + that returned by IAudioClient_GetMixFormat() also results in an error. I'm therefore disabling low-latency shared mode with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. + */ +#ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE + if ((streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) == 0 || nativeSampleRate == wf.Format.nSamplesPerSec) { + ma_IAudioClient3* pAudioClient3 = NULL; + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3); + if (SUCCEEDED(hr)) { + UINT32 defaultPeriodInFrames; + UINT32 fundamentalPeriodInFrames; + UINT32 minPeriodInFrames; + UINT32 maxPeriodInFrames; + hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames); + if (SUCCEEDED(hr)) { + UINT32 desiredPeriodInFrames = pData->periodSizeInFramesOut; + UINT32 actualPeriodInFrames = desiredPeriodInFrames; + + /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */ + actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames; + actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames; + + /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */ + actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames); + + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames); + printf(" defaultPeriodInFrames=%d\n", defaultPeriodInFrames); + printf(" fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames); + printf(" minPeriodInFrames=%d\n", minPeriodInFrames); + printf(" maxPeriodInFrames=%d\n", maxPeriodInFrames); + #endif + + /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */ + if (actualPeriodInFrames >= desiredPeriodInFrames) { + /* + MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified, + IAudioClient3_InitializeSharedAudioStream() will fail. + */ + hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + wasInitializedUsingIAudioClient3 = MA_TRUE; + pData->periodSizeInFramesOut = actualPeriodInFrames; + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] Using IAudioClient3\n"); + printf(" periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut); + #endif + } else { + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n"); + #endif + } + } else { + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n"); + #endif + } + } else { + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n"); + #endif + } + + ma_IAudioClient3_Release(pAudioClient3); + pAudioClient3 = NULL; + } + } +#else + #if defined(MA_DEBUG_OUTPUT) + printf("[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n"); + #endif +#endif + + /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */ + if (!wasInitializedUsingIAudioClient3) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */ + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (WAVEFORMATEX*)&wf, NULL); + if (FAILED(hr)) { + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device.", result = ma_result_from_HRESULT(hr); + } + + goto done; + } + } + } + + if (!wasInitializedUsingIAudioClient3) { + ma_uint32 bufferSizeInFrames; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (FAILED(hr)) { + errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = ma_result_from_HRESULT(hr); + goto done; + } + + pData->periodSizeInFramesOut = bufferSizeInFrames / pData->periodsOut; + } + + pData->usingAudioClient3 = wasInitializedUsingIAudioClient3; + + if (deviceType == ma_device_type_playback) { + hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioRenderClient, (void**)&pData->pRenderClient); + } else { + hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioCaptureClient, (void**)&pData->pCaptureClient); + } + + if (FAILED(hr)) { + errorMsg = "[WASAPI] Failed to get audio client service.", result = ma_result_from_HRESULT(hr); + goto done; + } + + + /* Grab the name of the device. */ +#ifdef MA_WIN32_DESKTOP + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + PROPVARIANT varName; + ma_PropVariantInit(&varName); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE); + ma_PropVariantClear(pContext, &varName); + } + + ma_IPropertyStore_Release(pProperties); + } + } +#endif + +done: + /* Clean up. */ +#ifdef MA_WIN32_DESKTOP + if (pDeviceInterface != NULL) { + ma_IMMDevice_Release(pDeviceInterface); + } +#else + if (pDeviceInterface != NULL) { + ma_IUnknown_Release(pDeviceInterface); + } +#endif + + if (result != MA_SUCCESS) { + if (pData->pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient); + pData->pRenderClient = NULL; + } + if (pData->pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient); + pData->pCaptureClient = NULL; + } + if (pData->pAudioClient) { + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + pData->pAudioClient = NULL; + } + + if (errorMsg != NULL && errorMsg[0] != '\0') { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, errorMsg, result); + } + + return result; + } else { + return MA_SUCCESS; + } +} + +static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_device_init_internal_data__wasapi data; + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* We only re-initialize the playback or capture device. Never a full-duplex device. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.shareMode = pDevice->playback.shareMode; + data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; + data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; + data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap; + } else { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.shareMode = pDevice->capture.shareMode; + data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; + data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; + data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap; + } + + data.sampleRateIn = pDevice->sampleRate; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.periodSizeInFramesIn = pDevice->wasapi.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->wasapi.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->wasapi.originalPeriods; + data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading; + result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data); + if (result != MA_SUCCESS) { + return result; + } + + /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + if (pDevice->wasapi.pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + + if (pDevice->wasapi.pAudioClientCapture) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualPeriodSizeInFramesCapture); + + /* The device may be in a started state. If so we need to immediately restart it. */ + if (pDevice->wasapi.isStartedCapture) { + HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device after reinitialization.", ma_result_from_HRESULT(hr)); + } + } + } + + if (deviceType == ma_device_type_playback) { + if (pDevice->wasapi.pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + + if (pDevice->wasapi.pAudioClientPlayback) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + pDevice->wasapi.pAudioClientPlayback = NULL; + } + + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualPeriodSizeInFramesPlayback); + + /* The device may be in a started state. If so we need to immediately restart it. */ + if (pDevice->wasapi.isStartedPlayback) { + HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device after reinitialization.", ma_result_from_HRESULT(hr)); + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + + (void)pContext; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->wasapi); + pDevice->wasapi.originalPeriodSizeInFrames = pConfig->periodSizeInFrames; + pDevice->wasapi.originalPeriodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + pDevice->wasapi.originalPeriods = pConfig->periods; + pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + + /* Exclusive mode is not allowed with loopback. */ + if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) { + return MA_INVALID_DEVICE_CONFIG; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pConfig->capture.format; + data.channelsIn = pConfig->capture.channels; + data.sampleRateIn = pConfig->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); + data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; + data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap; + data.shareMode = pConfig->capture.shareMode; + data.periodSizeInFramesIn = pConfig->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds; + data.periodsIn = pConfig->periods; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + + result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pConfig->capture.pDeviceID, &data); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); + + /* + The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled, + however, because we want to block until we actually have something for the first call to ma_device_read(). + */ + pDevice->wasapi.hEventCapture = CreateEventW(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */ + if (pDevice->wasapi.hEventCapture == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture.", result); + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualPeriodSizeInFramesCapture); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pConfig->playback.format; + data.channelsIn = pConfig->playback.channels; + data.sampleRateIn = pConfig->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); + data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; + data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap; + data.shareMode = pConfig->playback.shareMode; + data.periodSizeInFramesIn = pConfig->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds; + data.periodsIn = pConfig->periods; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + + result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle(pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + return result; + } + + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); + + /* + The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled + only after the whole available space has been filled, never before. + + The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able + to get passed WaitForMultipleObjects(). + */ + pDevice->wasapi.hEventPlayback = CreateEventW(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */ + if (pDevice->wasapi.hEventPlayback == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle(pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + + if (pDevice->wasapi.pRenderClient != NULL) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + if (pDevice->wasapi.pAudioClientPlayback != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + pDevice->wasapi.pAudioClientPlayback = NULL; + } + + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback.", result); + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualPeriodSizeInFramesPlayback); + } + + /* + We need to get notifications of when the default device changes. We do this through a device enumerator by + registering a IMMNotificationClient with it. We only care about this if it's the default device. + */ +#ifdef MA_WIN32_DESKTOP + if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) { + if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID == NULL) { + pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE; + } + if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) { + pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE; + } + + if (pDevice->wasapi.allowCaptureAutoStreamRouting || pDevice->wasapi.allowPlaybackAutoStreamRouting) { + ma_IMMDeviceEnumerator* pDeviceEnumerator; + HRESULT hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_device_uninit__wasapi(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr)); + } + + pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl; + pDevice->wasapi.notificationClient.counter = 1; + pDevice->wasapi.notificationClient.pDevice = pDevice; + + hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient); + if (SUCCEEDED(hr)) { + pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator; + } else { + /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */ + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + } + } + } +#endif + + ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE); + ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + + return MA_SUCCESS; +} + +static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount) +{ + ma_uint32 paddingFramesCount; + HRESULT hr; + ma_share_mode shareMode; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrameCount != NULL); + + *pFrameCount = 0; + + if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) { + return MA_INVALID_OPERATION; + } + + hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + /* Slightly different rules for exclusive and shared modes. */ + shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode; + if (shareMode == ma_share_mode_exclusive) { + *pFrameCount = paddingFramesCount; + } else { + if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) { + *pFrameCount = pDevice->wasapi.actualPeriodSizeInFramesPlayback - paddingFramesCount; + } else { + *pFrameCount = paddingFramesCount; + } + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_playback) { + return pDevice->wasapi.hasDefaultPlaybackDeviceChanged; + } + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + return pDevice->wasapi.hasDefaultCaptureDeviceChanged; + } + + return MA_FALSE; +} + +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + if (deviceType == ma_device_type_playback) { + ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE); + } + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_FALSE); + } + + + #ifdef MA_DEBUG_OUTPUT + printf("=== CHANGING DEVICE ===\n"); + #endif + + result = ma_device_reinit__wasapi(pDevice, deviceType); + if (result != MA_SUCCESS) { + return result; + } + + ma_device__post_init_setup(pDevice, deviceType); + + return MA_SUCCESS; +} + + +static ma_result ma_device_stop__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + We need to explicitly signal the capture event in loopback mode to ensure we return from WaitForSingleObject() when nothing is being played. When nothing + is being played, the event is never signalled internally by WASAPI which means we will deadlock when stopping the device. + */ + if (pDevice->type == ma_device_type_loopback) { + SetEvent((HANDLE)pDevice->wasapi.hEventCapture); + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_main_loop__wasapi(ma_device* pDevice) +{ + ma_result result; + HRESULT hr; + ma_bool32 exitLoop = MA_FALSE; + ma_uint32 framesWrittenToPlaybackDevice = 0; + ma_uint32 mappedDeviceBufferSizeInFramesCapture = 0; + ma_uint32 mappedDeviceBufferSizeInFramesPlayback = 0; + ma_uint32 mappedDeviceBufferFramesRemainingCapture = 0; + ma_uint32 mappedDeviceBufferFramesRemainingPlayback = 0; + BYTE* pMappedDeviceBufferCapture = NULL; + BYTE* pMappedDeviceBufferPlayback = NULL; + ma_uint32 bpfCaptureDevice = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfPlaybackDevice = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 bpfCaptureClient = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 bpfPlaybackClient = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint8 inputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputDataInClientFormatCap = sizeof(inputDataInClientFormat) / bpfCaptureClient; + ma_uint8 outputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputDataInClientFormatCap = sizeof(outputDataInClientFormat) / bpfPlaybackClient; + ma_uint32 outputDataInClientFormatCount = 0; + ma_uint32 outputDataInClientFormatConsumed = 0; + ma_uint32 periodSizeInFramesCapture = 0; + + MA_ASSERT(pDevice != NULL); + + /* The capture device needs to be started immediately. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + periodSizeInFramesCapture = pDevice->capture.internalPeriodSizeInFrames; + + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device.", ma_result_from_HRESULT(hr)); + } + ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE); + } + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + /* We may need to reroute the device. */ + if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_playback)) { + result = ma_device_reroute__wasapi(pDevice, ma_device_type_playback); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_capture)) { + result = ma_device_reroute__wasapi(pDevice, (pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + switch (pDevice->type) + { + case ma_device_type_duplex: + { + ma_uint32 framesAvailableCapture; + ma_uint32 framesAvailablePlayback; + DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */ + + /* The process is to map the playback buffer and fill it as quickly as possible from input data. */ + if (pMappedDeviceBufferPlayback == NULL) { + /* WASAPI is weird with exclusive mode. You need to wait on the event _before_ querying the available frames. */ + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) { + return MA_ERROR; /* Wait failed. */ + } + } + + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback); + if (result != MA_SUCCESS) { + return result; + } + + /*printf("TRACE 1: framesAvailablePlayback=%d\n", framesAvailablePlayback);*/ + + + /* In exclusive mode, the frame count needs to exactly match the value returned by GetCurrentPadding(). */ + if (pDevice->playback.shareMode != ma_share_mode_exclusive) { + if (framesAvailablePlayback > pDevice->wasapi.periodSizeInFramesPlayback) { + framesAvailablePlayback = pDevice->wasapi.periodSizeInFramesPlayback; + } + } + + /* If there's no frames available in the playback device we need to wait for more. */ + if (framesAvailablePlayback == 0) { + /* In exclusive mode we waited at the top. */ + if (pDevice->playback.shareMode != ma_share_mode_exclusive) { + if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) { + return MA_ERROR; /* Wait failed. */ + } + } + + continue; + } + + /* We're ready to map the playback device's buffer. We don't release this until it's been entirely filled. */ + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + mappedDeviceBufferSizeInFramesPlayback = framesAvailablePlayback; + mappedDeviceBufferFramesRemainingPlayback = framesAvailablePlayback; + } + + /* At this point we should have a buffer available for output. We need to keep writing input samples to it. */ + for (;;) { + /* Try grabbing some captured data if we haven't already got a mapped buffer. */ + if (pMappedDeviceBufferCapture == NULL) { + if (pDevice->capture.shareMode == ma_share_mode_shared) { + if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) { + return MA_ERROR; /* Wait failed. */ + } + } + + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + /*printf("TRACE 2: framesAvailableCapture=%d\n", framesAvailableCapture);*/ + + /* Wait for more if nothing is available. */ + if (framesAvailableCapture == 0) { + /* In exclusive mode we waited at the top. */ + if (pDevice->capture.shareMode != ma_share_mode_shared) { + if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) { + return MA_ERROR; /* Wait failed. */ + } + } + + continue; + } + + /* Getting here means there's data available for writing to the output device. */ + mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + + /* Overrun detection. */ + if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + /* Glitched. Probably due to an overrun. */ + #ifdef MA_DEBUG_OUTPUT + printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); + #endif + + /* + Exeriment: If we get an overrun it probably means we're straddling the end of the buffer. In order to prevent a never-ending sequence of glitches let's experiment + by dropping every frame until we're left with only a single period. To do this we just keep retrieving and immediately releasing buffers until we're down to the + last period. + */ + if (framesAvailableCapture >= pDevice->wasapi.actualPeriodSizeInFramesCapture) { + #ifdef MA_DEBUG_OUTPUT + printf("[WASAPI] Synchronizing capture stream. "); + #endif + do + { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + if (FAILED(hr)) { + break; + } + + framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture; + + if (framesAvailableCapture > 0) { + mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + } else { + pMappedDeviceBufferCapture = NULL; + mappedDeviceBufferSizeInFramesCapture = 0; + } + } while (framesAvailableCapture > periodSizeInFramesCapture); + #ifdef MA_DEBUG_OUTPUT + printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); + #endif + } + } else { + #ifdef MA_DEBUG_OUTPUT + if (flagsCapture != 0) { + printf("[WASAPI] Capture Flags: %d\n", flagsCapture); + } + #endif + } + + mappedDeviceBufferFramesRemainingCapture = mappedDeviceBufferSizeInFramesCapture; + } + + + /* At this point we should have both input and output data available. We now need to convert the data and post it to the client. */ + for (;;) { + BYTE* pRunningDeviceBufferCapture; + BYTE* pRunningDeviceBufferPlayback; + ma_uint32 framesToProcess; + ma_uint32 framesProcessed; + + pRunningDeviceBufferCapture = pMappedDeviceBufferCapture + ((mappedDeviceBufferSizeInFramesCapture - mappedDeviceBufferFramesRemainingCapture ) * bpfCaptureDevice); + pRunningDeviceBufferPlayback = pMappedDeviceBufferPlayback + ((mappedDeviceBufferSizeInFramesPlayback - mappedDeviceBufferFramesRemainingPlayback) * bpfPlaybackDevice); + + /* There may be some data sitting in the converter that needs to be processed first. Once this is exhaused, run the data callback again. */ + if (!pDevice->playback.converter.isPassthrough && outputDataInClientFormatConsumed < outputDataInClientFormatCount) { + ma_uint64 convertedFrameCountClient = (outputDataInClientFormatCount - outputDataInClientFormatConsumed); + ma_uint64 convertedFrameCountDevice = mappedDeviceBufferFramesRemainingPlayback; + void* pConvertedFramesClient = outputDataInClientFormat + (outputDataInClientFormatConsumed * bpfPlaybackClient); + void* pConvertedFramesDevice = pRunningDeviceBufferPlayback; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesClient, &convertedFrameCountClient, pConvertedFramesDevice, &convertedFrameCountDevice); + if (result != MA_SUCCESS) { + break; + } + + outputDataInClientFormatConsumed += (ma_uint32)convertedFrameCountClient; /* Safe cast. */ + mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)convertedFrameCountDevice; /* Safe cast. */ + + if (mappedDeviceBufferFramesRemainingPlayback == 0) { + break; + } + } + + /* + Getting here means we need to fire the callback. If format conversion is unnecessary, we can optimize this by passing the pointers to the internal + buffers directly to the callback. + */ + if (pDevice->capture.converter.isPassthrough && pDevice->playback.converter.isPassthrough) { + /* Optimal path. We can pass mapped pointers directly to the callback. */ + framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, mappedDeviceBufferFramesRemainingPlayback); + framesProcessed = framesToProcess; + + ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, pRunningDeviceBufferCapture, framesToProcess); + + mappedDeviceBufferFramesRemainingCapture -= framesProcessed; + mappedDeviceBufferFramesRemainingPlayback -= framesProcessed; + + if (mappedDeviceBufferFramesRemainingCapture == 0) { + break; /* Exhausted input data. */ + } + if (mappedDeviceBufferFramesRemainingPlayback == 0) { + break; /* Exhausted output data. */ + } + } else if (pDevice->capture.converter.isPassthrough) { + /* The input buffer is a passthrough, but the playback buffer requires a conversion. */ + framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, outputDataInClientFormatCap); + framesProcessed = framesToProcess; + + ma_device__on_data(pDevice, outputDataInClientFormat, pRunningDeviceBufferCapture, framesToProcess); + outputDataInClientFormatCount = framesProcessed; + outputDataInClientFormatConsumed = 0; + + mappedDeviceBufferFramesRemainingCapture -= framesProcessed; + if (mappedDeviceBufferFramesRemainingCapture == 0) { + break; /* Exhausted input data. */ + } + } else if (pDevice->playback.converter.isPassthrough) { + /* The input buffer requires conversion, the playback buffer is passthrough. */ + ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture; + ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, mappedDeviceBufferFramesRemainingPlayback); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess); + if (result != MA_SUCCESS) { + break; + } + + if (capturedClientFramesToProcess == 0) { + break; + } + + ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess); /* Safe cast. */ + + mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess; + mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)capturedClientFramesToProcess; + } else { + ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture; + ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, outputDataInClientFormatCap); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess); + if (result != MA_SUCCESS) { + break; + } + + if (capturedClientFramesToProcess == 0) { + break; + } + + ma_device__on_data(pDevice, outputDataInClientFormat, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess); + + mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess; + outputDataInClientFormatCount = (ma_uint32)capturedClientFramesToProcess; + outputDataInClientFormatConsumed = 0; + } + } + + + /* If at this point we've run out of capture data we need to release the buffer. */ + if (mappedDeviceBufferFramesRemainingCapture == 0 && pMappedDeviceBufferCapture != NULL) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + /*printf("TRACE: Released capture buffer\n");*/ + + pMappedDeviceBufferCapture = NULL; + mappedDeviceBufferFramesRemainingCapture = 0; + mappedDeviceBufferSizeInFramesCapture = 0; + } + + /* Get out of this loop if we're run out of room in the playback buffer. */ + if (mappedDeviceBufferFramesRemainingPlayback == 0) { + break; + } + } + + + /* If at this point we've run out of data we need to release the buffer. */ + if (mappedDeviceBufferFramesRemainingPlayback == 0 && pMappedDeviceBufferPlayback != NULL) { + hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + /*printf("TRACE: Released playback buffer\n");*/ + framesWrittenToPlaybackDevice += mappedDeviceBufferSizeInFramesPlayback; + + pMappedDeviceBufferPlayback = NULL; + mappedDeviceBufferFramesRemainingPlayback = 0; + mappedDeviceBufferSizeInFramesPlayback = 0; + } + + if (!pDevice->wasapi.isStartedPlayback) { + ma_uint32 startThreshold = pDevice->playback.internalPeriodSizeInFrames * 1; + + /* Prevent a deadlock. If we don't clamp against the actual buffer size we'll never end up starting the playback device which will result in a deadlock. */ + if (startThreshold > pDevice->wasapi.actualPeriodSizeInFramesPlayback) { + startThreshold = pDevice->wasapi.actualPeriodSizeInFramesPlayback; + } + + if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= startThreshold) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", ma_result_from_HRESULT(hr)); + } + ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE); + } + } + } break; + + + + case ma_device_type_capture: + case ma_device_type_loopback: + { + ma_uint32 framesAvailableCapture; + DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */ + + /* Wait for data to become available first. */ + if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) { + exitLoop = MA_TRUE; + break; /* Wait failed. */ + } + + /* See how many frames are available. Since we waited at the top, I don't think this should ever return 0. I'm checking for this anyway. */ + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + if (framesAvailableCapture < pDevice->wasapi.periodSizeInFramesCapture) { + continue; /* Nothing available. Keep waiting. */ + } + + /* Map the data buffer in preparation for sending to the client. */ + mappedDeviceBufferSizeInFramesCapture = framesAvailableCapture; + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + /* Overrun detection. */ + if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + /* Glitched. Probably due to an overrun. */ + #ifdef MA_DEBUG_OUTPUT + printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); + #endif + + /* + Exeriment: If we get an overrun it probably means we're straddling the end of the buffer. In order to prevent a never-ending sequence of glitches let's experiment + by dropping every frame until we're left with only a single period. To do this we just keep retrieving and immediately releasing buffers until we're down to the + last period. + */ + if (framesAvailableCapture >= pDevice->wasapi.actualPeriodSizeInFramesCapture) { + #ifdef MA_DEBUG_OUTPUT + printf("[WASAPI] Synchronizing capture stream. "); + #endif + do + { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + if (FAILED(hr)) { + break; + } + + framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture; + + if (framesAvailableCapture > 0) { + mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + } else { + pMappedDeviceBufferCapture = NULL; + mappedDeviceBufferSizeInFramesCapture = 0; + } + } while (framesAvailableCapture > periodSizeInFramesCapture); + #ifdef MA_DEBUG_OUTPUT + printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); + #endif + } + } else { + #ifdef MA_DEBUG_OUTPUT + if (flagsCapture != 0) { + printf("[WASAPI] Capture Flags: %d\n", flagsCapture); + } + #endif + } + + /* We should have a buffer at this point, but let's just do a sanity check anyway. */ + if (mappedDeviceBufferSizeInFramesCapture > 0 && pMappedDeviceBufferCapture != NULL) { + ma_device__send_frames_to_client(pDevice, mappedDeviceBufferSizeInFramesCapture, pMappedDeviceBufferCapture); + + /* At this point we're done with the buffer. */ + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + pMappedDeviceBufferCapture = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ + mappedDeviceBufferSizeInFramesCapture = 0; + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + } + } break; + + + + case ma_device_type_playback: + { + ma_uint32 framesAvailablePlayback; + + /* Wait for space to become available first. */ + if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) { + exitLoop = MA_TRUE; + break; /* Wait failed. */ + } + + /* Check how much space is available. If this returns 0 we just keep waiting. */ + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + if (framesAvailablePlayback < pDevice->wasapi.periodSizeInFramesPlayback) { + continue; /* No space available. */ + } + + /* Map a the data buffer in preparation for the callback. */ + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + /* We should have a buffer at this point. */ + ma_device__read_frames_from_client(pDevice, framesAvailablePlayback, pMappedDeviceBufferPlayback); + + /* At this point we're done writing to the device and we just need to release the buffer. */ + hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, 0); + pMappedDeviceBufferPlayback = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ + mappedDeviceBufferSizeInFramesPlayback = 0; + + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + + framesWrittenToPlaybackDevice += framesAvailablePlayback; + if (!pDevice->wasapi.isStartedPlayback) { + if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames*1) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", ma_result_from_HRESULT(hr)); + exitLoop = MA_TRUE; + break; + } + ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE); + } + } + } break; + + default: return MA_INVALID_ARGS; + } + } + + /* Here is where the device needs to be stopped. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + /* Any mapped buffers need to be released. */ + if (pMappedDeviceBufferCapture != NULL) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + } + + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device.", ma_result_from_HRESULT(hr)); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device.", ma_result_from_HRESULT(hr)); + } + + ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* Any mapped buffers need to be released. */ + if (pMappedDeviceBufferPlayback != NULL) { + hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0); + } + + /* + The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to + the speakers. This is a problem for very short sounds because it'll result in a significant portion of it not getting played. + */ + if (pDevice->wasapi.isStartedPlayback) { + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE); + } else { + ma_uint32 prevFramesAvaialablePlayback = (ma_uint32)-1; + ma_uint32 framesAvailablePlayback; + for (;;) { + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback); + if (result != MA_SUCCESS) { + break; + } + + if (framesAvailablePlayback >= pDevice->wasapi.actualPeriodSizeInFramesPlayback) { + break; + } + + /* + Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames + has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case. + */ + if (framesAvailablePlayback == prevFramesAvaialablePlayback) { + break; + } + prevFramesAvaialablePlayback = framesAvailablePlayback; + + WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE); + ResetEvent(pDevice->wasapi.hEventPlayback); /* Manual reset. */ + } + } + } + + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device.", ma_result_from_HRESULT(hr)); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device.", ma_result_from_HRESULT(hr)); + } + + ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__wasapi(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_wasapi); + (void)pContext; + + return MA_SUCCESS; +} + +static ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* pContext) +{ + ma_result result = MA_SUCCESS; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + +#ifdef MA_WIN32_DESKTOP + /* + WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven + exclusive mode does not work until SP1. + + Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a lin error. + */ + { + ma_OSVERSIONINFOEXW osvi; + ma_handle kernel32DLL; + ma_PFNVerifyVersionInfoW _VerifyVersionInfoW; + ma_PFNVerSetConditionMask _VerSetConditionMask; + + kernel32DLL = ma_dlopen(pContext, "kernel32.dll"); + if (kernel32DLL == NULL) { + return MA_NO_BACKEND; + } + + _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW)ma_dlsym(pContext, kernel32DLL, "VerifyVersionInfoW"); + _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(pContext, kernel32DLL, "VerSetConditionMask"); + if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) { + ma_dlclose(pContext, kernel32DLL); + return MA_NO_BACKEND; + } + + MA_ZERO_OBJECT(&osvi); + osvi.dwOSVersionInfoSize = sizeof(osvi); + osvi.dwMajorVersion = ((MA_WIN32_WINNT_VISTA >> 8) & 0xFF); + osvi.dwMinorVersion = ((MA_WIN32_WINNT_VISTA >> 0) & 0xFF); + osvi.wServicePackMajor = 1; + if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) { + result = MA_SUCCESS; + } else { + result = MA_NO_BACKEND; + } + + ma_dlclose(pContext, kernel32DLL); + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + pContext->onUninit = ma_context_uninit__wasapi; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__wasapi; + pContext->onEnumDevices = ma_context_enumerate_devices__wasapi; + pContext->onGetDeviceInfo = ma_context_get_device_info__wasapi; + pContext->onDeviceInit = ma_device_init__wasapi; + pContext->onDeviceUninit = ma_device_uninit__wasapi; + pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */ + pContext->onDeviceStop = ma_device_stop__wasapi; /* Required to ensure the capture event is signalled when stopping a loopback device while nothing is playing. */ + pContext->onDeviceMainLoop = ma_device_main_loop__wasapi; + + return result; +} +#endif + +/****************************************************************************** + +DirectSound Backend + +******************************************************************************/ +#ifdef MA_HAS_DSOUND +/*#include */ + +static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}}; + +/* miniaudio only uses priority or exclusive modes. */ +#define MA_DSSCL_NORMAL 1 +#define MA_DSSCL_PRIORITY 2 +#define MA_DSSCL_EXCLUSIVE 3 +#define MA_DSSCL_WRITEPRIMARY 4 + +#define MA_DSCAPS_PRIMARYMONO 0x00000001 +#define MA_DSCAPS_PRIMARYSTEREO 0x00000002 +#define MA_DSCAPS_PRIMARY8BIT 0x00000004 +#define MA_DSCAPS_PRIMARY16BIT 0x00000008 +#define MA_DSCAPS_CONTINUOUSRATE 0x00000010 +#define MA_DSCAPS_EMULDRIVER 0x00000020 +#define MA_DSCAPS_CERTIFIED 0x00000040 +#define MA_DSCAPS_SECONDARYMONO 0x00000100 +#define MA_DSCAPS_SECONDARYSTEREO 0x00000200 +#define MA_DSCAPS_SECONDARY8BIT 0x00000400 +#define MA_DSCAPS_SECONDARY16BIT 0x00000800 + +#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001 +#define MA_DSBCAPS_STATIC 0x00000002 +#define MA_DSBCAPS_LOCHARDWARE 0x00000004 +#define MA_DSBCAPS_LOCSOFTWARE 0x00000008 +#define MA_DSBCAPS_CTRL3D 0x00000010 +#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020 +#define MA_DSBCAPS_CTRLPAN 0x00000040 +#define MA_DSBCAPS_CTRLVOLUME 0x00000080 +#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100 +#define MA_DSBCAPS_CTRLFX 0x00000200 +#define MA_DSBCAPS_STICKYFOCUS 0x00004000 +#define MA_DSBCAPS_GLOBALFOCUS 0x00008000 +#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000 +#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000 +#define MA_DSBCAPS_LOCDEFER 0x00040000 +#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000 + +#define MA_DSBPLAY_LOOPING 0x00000001 +#define MA_DSBPLAY_LOCHARDWARE 0x00000002 +#define MA_DSBPLAY_LOCSOFTWARE 0x00000004 +#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008 +#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010 +#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020 + +#define MA_DSCBSTART_LOOPING 0x00000001 + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + WAVEFORMATEX* lpwfxFormat; + GUID guid3DAlgorithm; +} MA_DSBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + WAVEFORMATEX* lpwfxFormat; + DWORD dwFXCount; + void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */ +} MA_DSCBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwMinSecondarySampleRate; + DWORD dwMaxSecondarySampleRate; + DWORD dwPrimaryBuffers; + DWORD dwMaxHwMixingAllBuffers; + DWORD dwMaxHwMixingStaticBuffers; + DWORD dwMaxHwMixingStreamingBuffers; + DWORD dwFreeHwMixingAllBuffers; + DWORD dwFreeHwMixingStaticBuffers; + DWORD dwFreeHwMixingStreamingBuffers; + DWORD dwMaxHw3DAllBuffers; + DWORD dwMaxHw3DStaticBuffers; + DWORD dwMaxHw3DStreamingBuffers; + DWORD dwFreeHw3DAllBuffers; + DWORD dwFreeHw3DStaticBuffers; + DWORD dwFreeHw3DStreamingBuffers; + DWORD dwTotalHwMemBytes; + DWORD dwFreeHwMemBytes; + DWORD dwMaxContigFreeHwMemBytes; + DWORD dwUnlockTransferRateHwBuffers; + DWORD dwPlayCpuOverheadSwBuffers; + DWORD dwReserved1; + DWORD dwReserved2; +} MA_DSCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwUnlockTransferRate; + DWORD dwPlayCpuOverhead; +} MA_DSBCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwFormats; + DWORD dwChannels; +} MA_DSCCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; +} MA_DSCBCAPS; + +typedef struct +{ + DWORD dwOffset; + HANDLE hEventNotify; +} MA_DSBPOSITIONNOTIFY; + +typedef struct ma_IDirectSound ma_IDirectSound; +typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer; +typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture; +typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer; +typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify; + + +/* +COM objects. The way these work is that you have a vtable (a list of function pointers, kind of +like how C++ works internally), and then you have a structure with a single member, which is a +pointer to the vtable. The vtable is where the methods of the object are defined. Methods need +to be in a specific order, and parent classes need to have their methods declared first. +*/ + +/* IDirectSound */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis); + + /* IDirectSound */ + HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps); + HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate); + HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel); + HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis); + HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundVtbl; +struct ma_IDirectSound +{ + ma_IDirectSoundVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); } +static MA_INLINE HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); } +static MA_INLINE HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); } +static MA_INLINE HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis); + + /* IDirectSoundBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume); + HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan); + HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition); + HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat); + HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume); + HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan); + HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); + HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis); +} ma_IDirectSoundBufferVtbl; +struct ma_IDirectSoundBuffer +{ + ma_IDirectSoundBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); } + + +/* IDirectSoundCapture */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis); + + /* IDirectSoundCapture */ + HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundCaptureVtbl; +struct ma_IDirectSoundCapture +{ + ma_IDirectSoundCaptureVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCapture_QueryInterface(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCapture_AddRef(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCapture_Release(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundCaptureBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis); + + /* IDirectSoundCaptureBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); +} ma_IDirectSoundCaptureBufferVtbl; +struct ma_IDirectSoundCaptureBuffer +{ + ma_IDirectSoundCaptureBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } + + +/* IDirectSoundNotify */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis); + + /* IDirectSoundNotify */ + HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies); +} ma_IDirectSoundNotifyVtbl; +struct ma_IDirectSoundNotify +{ + ma_IDirectSoundNotifyVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); } + + +typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (LPGUID pDeviceGUID, LPCSTR pDeviceDescription, LPCSTR pModule, LPVOID pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, LPUNKNOWN pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, LPUNKNOWN pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext); + +static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax) +{ + /* Normalize the range in case we were given something stupid. */ + if (sampleRateMin < MA_MIN_SAMPLE_RATE) { + sampleRateMin = MA_MIN_SAMPLE_RATE; + } + if (sampleRateMax > MA_MAX_SAMPLE_RATE) { + sampleRateMax = MA_MAX_SAMPLE_RATE; + } + if (sampleRateMin > sampleRateMax) { + sampleRateMin = sampleRateMax; + } + + if (sampleRateMin == sampleRateMax) { + return sampleRateMax; + } else { + size_t iStandardRate; + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) { + return standardRate; + } + } + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +/* +Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown, +the channel count and channel map will be left unmodified. +*/ +static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut) +{ + WORD channels; + DWORD channelMap; + + channels = 0; + if (pChannelsOut != NULL) { + channels = *pChannelsOut; + } + + channelMap = 0; + if (pChannelMapOut != NULL) { + channelMap = *pChannelMapOut; + } + + /* + The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper + 16 bits is for the geometry. + */ + switch ((BYTE)(speakerConfig)) { + case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break; + case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break; + case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break; + case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + default: break; + } + + if (pChannelsOut != NULL) { + *pChannelsOut = channels; + } + + if (pChannelMapOut != NULL) { + *pChannelMapOut = channelMap; + } +} + + +static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound) +{ + ma_IDirectSound* pDirectSound; + HWND hWnd; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSound != NULL); + + *ppDirectSound = NULL; + pDirectSound = NULL; + + if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + /* The cooperative level must be set before doing anything else. */ + hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)(); + if (hWnd == NULL) { + hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)(); + } + + hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device.", ma_result_from_HRESULT(hr)); + } + + *ppDirectSound = pDirectSound; + return MA_SUCCESS; +} + +static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture) +{ + ma_IDirectSoundCapture* pDirectSoundCapture; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSoundCapture != NULL); + + /* DirectSound does not support exclusive mode for capture. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + *ppDirectSoundCapture = NULL; + pDirectSoundCapture = NULL; + + hr = ((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device.", ma_result_from_HRESULT(hr)); + } + + *ppDirectSoundCapture = pDirectSoundCapture; + return MA_SUCCESS; +} + +static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + HRESULT hr; + MA_DSCCAPS caps; + WORD bitsPerSample; + DWORD sampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDirectSoundCapture != NULL); + + if (pChannels) { + *pChannels = 0; + } + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device.", ma_result_from_HRESULT(hr)); + } + + if (pChannels) { + *pChannels = (WORD)caps.dwChannels; + } + + /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */ + bitsPerSample = 16; + sampleRate = 48000; + + if (caps.dwChannels == 1) { + if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } else if (caps.dwChannels == 2) { + if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_context_is_device_id_equal__dsound(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return memcmp(pID0->dsound, pID1->dsound, sizeof(pID0->dsound)) == 0; +} + + +typedef struct +{ + ma_context* pContext; + ma_device_type deviceType; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 terminated; +} ma_context_enumerate_devices_callback_data__dsound; + +static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) +{ + ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext; + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID. */ + if (lpGuid != NULL) { + MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16); + } else { + MA_ZERO_MEMORY(deviceInfo.id.dsound, 16); + } + + /* Name / Description */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1); + + + /* Call the callback function, but make sure we stop enumerating if the callee requested so. */ + MA_ASSERT(pData != NULL); + pData->terminated = !pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData); + if (pData->terminated) { + return FALSE; /* Stop enumeration. */ + } else { + return TRUE; /* Continue enumeration. */ + } + + (void)lpcstrModule; +} + +static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__dsound data; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + data.pContext = pContext; + data.callback = callback; + data.pUserData = pUserData; + data.terminated = MA_FALSE; + + /* Playback. */ + if (!data.terminated) { + data.deviceType = ma_device_type_playback; + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + /* Capture. */ + if (!data.terminated) { + data.deviceType = ma_device_type_capture; + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + return MA_SUCCESS; +} + + +typedef struct +{ + const ma_device_id* pDeviceID; + ma_device_info* pDeviceInfo; + ma_bool32 found; +} ma_context_get_device_info_callback_data__dsound; + +static BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) +{ + ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext; + MA_ASSERT(pData != NULL); + + if ((pData->pDeviceID == NULL || ma_is_guid_equal(pData->pDeviceID->dsound, &MA_GUID_NULL)) && (lpGuid == NULL || ma_is_guid_equal(lpGuid, &MA_GUID_NULL))) { + /* Default device. */ + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } else { + /* Not the default device. */ + if (lpGuid != NULL && pData->pDeviceID != NULL) { + if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } + } + } + + (void)lpcstrModule; + return TRUE; +} + +static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_result result; + HRESULT hr; + + /* Exclusive mode and capture not supported with DirectSound. */ + if (deviceType == ma_device_type_capture && shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pDeviceID != NULL) { + ma_context_get_device_info_callback_data__dsound data; + + /* ID. */ + MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16); + + /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */ + data.pDeviceID = pDeviceID; + data.pDeviceInfo = pDeviceInfo; + data.found = MA_FALSE; + if (deviceType == ma_device_type_playback) { + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } else { + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } + + if (!data.found) { + return MA_NO_DEVICE; + } + } else { + /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */ + + /* ID */ + MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16); + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + } + + /* Retrieving detailed information is slightly different depending on the device type. */ + if (deviceType == ma_device_type_playback) { + /* Playback. */ + ma_IDirectSound* pDirectSound; + MA_DSCAPS caps; + ma_uint32 iFormat; + + result = ma_context_create_IDirectSound__dsound(pContext, shareMode, pDeviceID, &pDirectSound); + if (result != MA_SUCCESS) { + return result; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps(pDirectSound, &caps); + if (FAILED(hr)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", ma_result_from_HRESULT(hr)); + } + + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + /* It supports at least stereo, but could support more. */ + WORD channels = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + DWORD speakerConfig; + hr = ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig); + if (SUCCEEDED(hr)) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL); + } + + pDeviceInfo->minChannels = channels; + pDeviceInfo->maxChannels = channels; + } else { + /* It does not support stereo, which means we are stuck with mono. */ + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = 1; + } + + /* Sample rate. */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + pDeviceInfo->minSampleRate = caps.dwMinSecondarySampleRate; + pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate; + + /* + On my machine the min and max sample rates can return 100 and 200000 respectively. I'd rather these be within + the range of our standard sample rates so I'm clamping. + */ + if (caps.dwMinSecondarySampleRate < MA_MIN_SAMPLE_RATE && caps.dwMaxSecondarySampleRate >= MA_MIN_SAMPLE_RATE) { + pDeviceInfo->minSampleRate = MA_MIN_SAMPLE_RATE; + } + if (caps.dwMaxSecondarySampleRate > MA_MAX_SAMPLE_RATE && caps.dwMinSecondarySampleRate <= MA_MAX_SAMPLE_RATE) { + pDeviceInfo->maxSampleRate = MA_MAX_SAMPLE_RATE; + } + } else { + /* Only supports a single sample rate. Set both min an max to the same thing. Do not clamp within the standard rates. */ + pDeviceInfo->minSampleRate = caps.dwMaxSecondarySampleRate; + pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate; + } + + /* DirectSound can support all formats. */ + pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */ + for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) { + pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */ + } + + ma_IDirectSound_Release(pDirectSound); + } else { + /* + Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture + devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just + reporting the best format. + */ + ma_IDirectSoundCapture* pDirectSoundCapture; + WORD channels; + WORD bitsPerSample; + DWORD sampleRate; + + result = ma_context_create_IDirectSoundCapture__dsound(pContext, shareMode, pDeviceID, &pDirectSoundCapture); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + return result; + } + + pDeviceInfo->minChannels = channels; + pDeviceInfo->maxChannels = channels; + pDeviceInfo->minSampleRate = sampleRate; + pDeviceInfo->maxSampleRate = sampleRate; + pDeviceInfo->formatCount = 1; + if (bitsPerSample == 8) { + pDeviceInfo->formats[0] = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->formats[0] = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->formats[0] = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->formats[0] = ma_format_s32; + } else { + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + return MA_FORMAT_NOT_SUPPORTED; + } + + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + } + + return MA_SUCCESS; +} + + + +static void ma_device_uninit__dsound(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->dsound.pCaptureBuffer != NULL) { + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + } + if (pDevice->dsound.pCapture != NULL) { + ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture); + } + + if (pDevice->dsound.pPlaybackBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + } + if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer); + } + if (pDevice->dsound.pPlayback != NULL) { + ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback); + } +} + +static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF) +{ + GUID subformat; + + switch (format) + { + case ma_format_u8: + case ma_format_s16: + case ma_format_s24: + /*case ma_format_s24_32:*/ + case ma_format_s32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } break; + + case ma_format_f32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } break; + + default: + return MA_FORMAT_NOT_SUPPORTED; + } + + MA_ZERO_OBJECT(pWF); + pWF->Format.cbSize = sizeof(*pWF); + pWF->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + pWF->Format.nChannels = (WORD)channels; + pWF->Format.nSamplesPerSec = (DWORD)sampleRate; + pWF->Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8; + pWF->Format.nBlockAlign = (pWF->Format.nChannels * pWF->Format.wBitsPerSample) / 8; + pWF->Format.nAvgBytesPerSec = pWF->Format.nBlockAlign * pWF->Format.nSamplesPerSec; + pWF->Samples.wValidBitsPerSample = pWF->Format.wBitsPerSample; + pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels); + pWF->SubFormat = subformat; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + HRESULT hr; + ma_uint32 periodSizeInMilliseconds; + + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->dsound); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + if (periodSizeInMilliseconds == 0) { + periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate); + } + + /* DirectSound should use a latency of about 20ms per period for low latency mode. */ + if (pDevice->usingDefaultBufferSize) { + if (pConfig->performanceProfile == ma_performance_profile_low_latency) { + periodSizeInMilliseconds = 20; + } else { + periodSizeInMilliseconds = 200; + } + } + + /* DirectSound breaks down with tiny buffer sizes (bad glitching and silent output). I am therefore restricting the size of the buffer to a minimum of 20 milliseconds. */ + if (periodSizeInMilliseconds < 20) { + periodSizeInMilliseconds = 20; + } + + /* + Unfortunately DirectSound uses different APIs and data structures for playback and catpure devices. We need to initialize + the capture device first because we'll want to match it's buffer size and period count on the playback side if we're using + full-duplex mode. + */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + WAVEFORMATEXTENSIBLE wf; + MA_DSCBUFFERDESC descDS; + ma_uint32 periodSizeInFrames; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + WAVEFORMATEXTENSIBLE* pActualFormat; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSoundCapture__dsound(pContext, pConfig->capture.shareMode, pConfig->capture.pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.Format.nChannels, &wf.Format.wBitsPerSample, &wf.Format.nSamplesPerSec); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8; + wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = wf.Format.wBitsPerSample; + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, wf.Format.nSamplesPerSec); + + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = 0; + descDS.dwBufferBytes = periodSizeInFrames * pConfig->periods * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels); + descDS.lpwfxFormat = (WAVEFORMATEX*)&wf; + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", ma_result_from_HRESULT(hr)); + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer.", ma_result_from_HRESULT(hr)); + } + + pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat); + pDevice->capture.internalChannels = pActualFormat->Format.nChannels; + pDevice->capture.internalSampleRate = pActualFormat->Format.nSamplesPerSec; + + /* Get the internal channel map based on the channel mask. */ + if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + } + + /* + After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the + user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case. + */ + if (periodSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) / pConfig->periods)) { + descDS.dwBufferBytes = periodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels) * pConfig->periods; + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", ma_result_from_HRESULT(hr)); + } + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->capture.internalPeriods = pConfig->periods; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + WAVEFORMATEXTENSIBLE wf; + MA_DSBUFFERDESC descDSPrimary; + MA_DSCAPS caps; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + WAVEFORMATEXTENSIBLE* pActualFormat; + ma_uint32 periodSizeInFrames; + MA_DSBUFFERDESC descDS; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSound__dsound(pContext, pConfig->playback.shareMode, pConfig->playback.pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + MA_ZERO_OBJECT(&descDSPrimary); + descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC); + descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer.", ma_result_from_HRESULT(hr)); + } + + + /* We may want to make some adjustments to the format if we are using defaults. */ + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", ma_result_from_HRESULT(hr)); + } + + if (pDevice->playback.usingDefaultChannels) { + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + DWORD speakerConfig; + + /* It supports at least stereo, but could support more. */ + wf.Format.nChannels = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &wf.Format.nChannels, &wf.dwChannelMask); + } + } else { + /* It does not support stereo, which means we are stuck with mono. */ + wf.Format.nChannels = 1; + } + } + + if (pDevice->usingDefaultSampleRate) { + /* We base the sample rate on the values returned by GetCaps(). */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + wf.Format.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate); + } else { + wf.Format.nSamplesPerSec = caps.dwMaxSecondarySampleRate; + } + } + + wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8; + wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec; + + /* + From MSDN: + + The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest + supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer + and compare the result with the format that was requested with the SetFormat method. + */ + hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)&wf); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer.", ma_result_from_HRESULT(hr)); + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer.", ma_result_from_HRESULT(hr)); + } + + pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat); + pDevice->playback.internalChannels = pActualFormat->Format.nChannels; + pDevice->playback.internalSampleRate = pActualFormat->Format.nSamplesPerSec; + + /* Get the internal channel map based on the channel mask. */ + if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + } + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->playback.internalSampleRate); + + /* + Meaning of dwFlags (from MSDN): + + DSBCAPS_CTRLPOSITIONNOTIFY + The buffer has position notification capability. + + DSBCAPS_GLOBALFOCUS + With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to + another application, even if the new application uses DirectSound. + + DSBCAPS_GETCURRENTPOSITION2 + In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated + sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the + application can get a more accurate play cursor. + */ + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2; + descDS.dwBufferBytes = periodSizeInFrames * pConfig->periods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + descDS.lpwfxFormat = (WAVEFORMATEX*)&wf; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer.", ma_result_from_HRESULT(hr)); + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->playback.internalPeriods = pConfig->periods; + } + + (void)pContext; + return MA_SUCCESS; +} + + +static ma_result ma_device_main_loop__dsound(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + HRESULT hr; + DWORD lockOffsetInBytesCapture; + DWORD lockSizeInBytesCapture; + DWORD mappedSizeInBytesCapture; + DWORD mappedDeviceFramesProcessedCapture; + void* pMappedDeviceBufferCapture; + DWORD lockOffsetInBytesPlayback; + DWORD lockSizeInBytesPlayback; + DWORD mappedSizeInBytesPlayback; + void* pMappedDeviceBufferPlayback; + DWORD prevReadCursorInBytesCapture = 0; + DWORD prevPlayCursorInBytesPlayback = 0; + ma_bool32 physicalPlayCursorLoopFlagPlayback = 0; + DWORD virtualWriteCursorInBytesPlayback = 0; + ma_bool32 virtualWriteCursorLoopFlagPlayback = 0; + ma_bool32 isPlaybackDeviceStarted = MA_FALSE; + ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */ + ma_uint32 waitTimeInMilliseconds = 1; + + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (FAILED(ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING))) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + } + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + /* If nothing is available we just sleep for a bit and return from this iteration. */ + if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + /* + The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure + we don't return until every frame has been copied over. + */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + if (lockSizeInBytesCapture == 0) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + } + + + /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */ + mappedDeviceFramesProcessedCapture = 0; + + for (;;) { /* Keep writing to the playback device. */ + ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 outputFramesInClientFormatCount; + ma_uint32 outputFramesInClientFormatConsumed = 0; + ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap); + ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture; + void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess; + mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess; + + ma_device__on_data(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess); + + /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */ + for (;;) { + ma_uint32 framesWrittenThisIteration; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + DWORD availableBytesPlayback; + DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */ + + /* We need the physical play and write cursors. */ + if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + #ifdef MA_DEBUG_OUTPUT + printf("[DirectSound] (Duplex/Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + #endif + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + #ifdef MA_DEBUG_OUTPUT + printf("[DirectSound] (Duplex/Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + #endif + availableBytesPlayback = 0; + } + } + + #ifdef MA_DEBUG_OUTPUT + /*printf("[DirectSound] (Duplex/Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/ + #endif + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback == 0) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (!isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr)); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + break; + } + + /* + Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent + endless glitching due to it constantly running out of data. + */ + if (isPlaybackDeviceStarted) { + DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback; + if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) { + silentPaddingInBytes = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback; + if (silentPaddingInBytes > lockSizeInBytesPlayback) { + silentPaddingInBytes = lockSizeInBytesPlayback; + } + + #ifdef MA_DEBUG_OUTPUT + printf("[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%d, silentPaddingInBytes=%d\n", availableBytesPlayback, silentPaddingInBytes); + #endif + } + } + + /* At this point we have a buffer for output. */ + if (silentPaddingInBytes > 0) { + MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes); + framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback; + } else { + ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed); + ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback; + void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback); + void* pConvertedFramesOut = pMappedDeviceBufferPlayback; + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut; + framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut; + } + + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0); + if (FAILED(hr)) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr)); + break; + } + + virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback; + if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += framesWrittenThisIteration; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalPeriodSizeInFrames*2)) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr)); + } + isPlaybackDeviceStarted = MA_TRUE; + } + + if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) { + break; /* We're finished with the output data.*/ + } + } + + if (clientCapturedFramesToProcess == 0) { + break; /* We just consumed every input sample. */ + } + } + + + /* At this point we're done with the mapped portion of the capture buffer. */ + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr)); + } + prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture); + } break; + + + + case ma_device_type_capture: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return MA_ERROR; + } + + /* If the previous capture position is the same as the current position we need to wait a bit longer. */ + if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) { + ma_sleep(waitTimeInMilliseconds); + continue; + } + + /* Getting here means we have capture data available. */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + #ifdef MA_DEBUG_OUTPUT + /*printf("[DirectSound] (Capture) physicalCaptureCursorInBytes=%d, physicalReadCursorInBytes=%d\n", physicalCaptureCursorInBytes, physicalReadCursorInBytes);*/ + /*printf("[DirectSound] (Capture) lockOffsetInBytesCapture=%d, lockSizeInBytesCapture=%d\n", lockOffsetInBytesCapture, lockSizeInBytesCapture);*/ + #endif + + if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + } + + #ifdef MA_DEBUG_OUTPUT + if (lockSizeInBytesCapture != mappedSizeInBytesCapture) { + printf("[DirectSound] (Capture) lockSizeInBytesCapture=%d != mappedSizeInBytesCapture=%d\n", lockSizeInBytesCapture, mappedSizeInBytesCapture); + } + #endif + + ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture); + + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr)); + } + prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture; + + if (prevReadCursorInBytesCapture == (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture)) { + prevReadCursorInBytesCapture = 0; + } + } break; + + + + case ma_device_type_playback: + { + DWORD availableBytesPlayback; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + #ifdef MA_DEBUG_OUTPUT + printf("[DirectSound] (Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + #endif + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + #ifdef MA_DEBUG_OUTPUT + printf("[DirectSound] (Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + #endif + availableBytesPlayback = 0; + } + } + + #ifdef MA_DEBUG_OUTPUT + /*printf("[DirectSound] (Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/ + #endif + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr)); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr)); + break; + } + + /* At this point we have a buffer for output. */ + ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback); + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0); + if (FAILED(hr)) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr)); + break; + } + + virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback; + if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr)); + } + isPlaybackDeviceStarted = MA_TRUE; + } + } break; + + + default: return MA_INVALID_ARGS; /* Invalid device type. */ + } + + if (result != MA_SUCCESS) { + return result; + } + } + + /* Getting here means the device is being stopped. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + hr = ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed.", ma_result_from_HRESULT(hr)); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */ + if (isPlaybackDeviceStarted) { + for (;;) { + DWORD availableBytesPlayback = 0; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + break; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + break; + } + } + + if (availableBytesPlayback >= (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback)) { + break; + } + + ma_sleep(waitTimeInMilliseconds); + } + } + + hr = ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + if (FAILED(hr)) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed.", ma_result_from_HRESULT(hr)); + } + + ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__dsound(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_dsound); + + ma_dlclose(pContext, pContext->dsound.hDSoundDLL); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__dsound(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->dsound.hDSoundDLL = ma_dlopen(pContext, "dsound.dll"); + if (pContext->dsound.hDSoundDLL == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->dsound.DirectSoundCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCreate"); + pContext->dsound.DirectSoundEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA"); + pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate"); + pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA"); + + pContext->onUninit = ma_context_uninit__dsound; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__dsound; + pContext->onEnumDevices = ma_context_enumerate_devices__dsound; + pContext->onGetDeviceInfo = ma_context_get_device_info__dsound; + pContext->onDeviceInit = ma_device_init__dsound; + pContext->onDeviceUninit = ma_device_uninit__dsound; + pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */ + pContext->onDeviceStop = NULL; /* Not used. Stopped in onDeviceMainLoop. */ + pContext->onDeviceMainLoop = ma_device_main_loop__dsound; + + return MA_SUCCESS; +} +#endif + + + +/****************************************************************************** + +WinMM Backend + +******************************************************************************/ +#ifdef MA_HAS_WINMM + +/* +Some older compilers don't have WAVEOUTCAPS2A and WAVEINCAPS2A, so we'll need to write this ourselves. These structures +are exactly the same as the older ones but they have a few GUIDs for manufacturer/product/name identification. I'm keeping +the names the same as the Win32 library for consistency, but namespaced to avoid naming conflicts with the Win32 version. +*/ +typedef struct +{ + WORD wMid; + WORD wPid; + MMVERSION vDriverVersion; + CHAR szPname[MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + DWORD dwSupport; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEOUTCAPS2A; +typedef struct +{ + WORD wMid; + WORD wPid; + MMVERSION vDriverVersion; + CHAR szPname[MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEINCAPS2A; + +typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void); +typedef MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEOUTCAPSA pwoc, UINT cbwoc); +typedef MMRESULT (WINAPI * MA_PFN_waveOutOpen)(LPHWAVEOUT phwo, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MMRESULT (WINAPI * MA_PFN_waveOutClose)(HWAVEOUT hwo); +typedef MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveOutWrite)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveOutReset)(HWAVEOUT hwo); +typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void); +typedef MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEINCAPSA pwic, UINT cbwic); +typedef MMRESULT (WINAPI * MA_PFN_waveInOpen)(LPHWAVEIN phwi, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MMRESULT (WINAPI * MA_PFN_waveInClose)(HWAVEIN hwi); +typedef MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh); +typedef MMRESULT (WINAPI * MA_PFN_waveInStart)(HWAVEIN hwi); +typedef MMRESULT (WINAPI * MA_PFN_waveInReset)(HWAVEIN hwi); + +static ma_result ma_result_from_MMRESULT(MMRESULT resultMM) +{ + switch (resultMM) { + case MMSYSERR_NOERROR: return MA_SUCCESS; + case MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS; + case MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS; + case MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY; + case MMSYSERR_INVALFLAG: return MA_INVALID_ARGS; + case MMSYSERR_INVALPARAM: return MA_INVALID_ARGS; + case MMSYSERR_HANDLEBUSY: return MA_BUSY; + case MMSYSERR_ERROR: return MA_ERROR; + default: return MA_ERROR; + } +} + +static char* ma_find_last_character(char* str, char ch) +{ + char* last; + + if (str == NULL) { + return NULL; + } + + last = NULL; + while (*str != '\0') { + if (*str == ch) { + last = str; + } + + str += 1; + } + + return last; +} + +static ma_uint32 ma_get_period_size_in_bytes(ma_uint32 periodSizeInFrames, ma_format format, ma_uint32 channels) +{ + return periodSizeInFrames * ma_get_bytes_per_frame(format, channels); +} + + +/* +Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so +we can do things generically and typesafely. Names are being kept the same for consistency. +*/ +typedef struct +{ + CHAR szPname[MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + GUID NameGuid; +} MA_WAVECAPSA; + +static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + WORD bitsPerSample = 0; + DWORD sampleRate = 0; + + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + if (channels == 1) { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + +static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF) +{ + MA_ASSERT(pWF != NULL); + + MA_ZERO_OBJECT(pWF); + pWF->cbSize = sizeof(*pWF); + pWF->wFormatTag = WAVE_FORMAT_PCM; + pWF->nChannels = (WORD)channels; + if (pWF->nChannels > 2) { + pWF->nChannels = 2; + } + + if (channels == 1) { + pWF->wBitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48M16) != 0) { + pWF->nSamplesPerSec = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) { + pWF->nSamplesPerSec = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) { + pWF->nSamplesPerSec = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) { + pWF->nSamplesPerSec = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) { + pWF->nSamplesPerSec = 96000; + } else { + pWF->wBitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48M08) != 0) { + pWF->nSamplesPerSec = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) { + pWF->nSamplesPerSec = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) { + pWF->nSamplesPerSec = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) { + pWF->nSamplesPerSec = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) { + pWF->nSamplesPerSec = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else { + pWF->wBitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48S16) != 0) { + pWF->nSamplesPerSec = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) { + pWF->nSamplesPerSec = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) { + pWF->nSamplesPerSec = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) { + pWF->nSamplesPerSec = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) { + pWF->nSamplesPerSec = 96000; + } else { + pWF->wBitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48S08) != 0) { + pWF->nSamplesPerSec = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) { + pWF->nSamplesPerSec = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) { + pWF->nSamplesPerSec = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) { + pWF->nSamplesPerSec = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) { + pWF->nSamplesPerSec = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } + + pWF->nBlockAlign = (pWF->nChannels * pWF->wBitsPerSample) / 8; + pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec; + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo) +{ + WORD bitsPerSample; + DWORD sampleRate; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* + Name / Description + + Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking + situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try + looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name. + */ + + /* Set the default to begin with. */ + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1); + + /* + Now try the registry. There's a few things to consider here: + - The name GUID can be null, in which we case we just need to stick to the original 31 characters. + - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters. + - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The + problem, however is that WASAPI and DirectSound use " ()" format (such as "Speakers (High Definition Audio)"), + but WinMM does not specificy the component name. From my admittedly limited testing, I've notice the component name seems to + usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component + name, and then concatenate the name from the registry. + */ + if (!ma_is_guid_equal(&pCaps->NameGuid, &MA_GUID_NULL)) { + wchar_t guidStrW[256]; + if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) { + char guidStr[256]; + char keyStr[1024]; + HKEY hKey; + + WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE); + + ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\"); + ma_strcat_s(keyStr, sizeof(keyStr), guidStr); + + if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) { + BYTE nameFromReg[512]; + DWORD nameFromRegSize = sizeof(nameFromReg); + result = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (LPBYTE)nameFromReg, (LPDWORD)&nameFromRegSize); + ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey); + + if (result == ERROR_SUCCESS) { + /* We have the value from the registry, so now we need to construct the name string. */ + char name[1024]; + if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) { + char* nameBeg = ma_find_last_character(name, '('); + if (nameBeg != NULL) { + size_t leadingLen = (nameBeg - name); + ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1); + + /* The closing ")", if it can fit. */ + if (leadingLen + nameFromRegSize < sizeof(name)-1) { + ma_strcat_s(name, sizeof(name), ")"); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1); + } + } + } + } + } + } + + + result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + return result; + } + + pDeviceInfo->minChannels = pCaps->wChannels; + pDeviceInfo->maxChannels = pCaps->wChannels; + pDeviceInfo->minSampleRate = sampleRate; + pDeviceInfo->maxSampleRate = sampleRate; + pDeviceInfo->formatCount = 1; + if (bitsPerSample == 8) { + pDeviceInfo->formats[0] = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->formats[0] = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->formats[0] = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->formats[0] = ma_format_s32; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + +static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + + +static ma_bool32 ma_context_is_device_id_equal__winmm(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->winmm == pID1->winmm; +} + +static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + UINT playbackDeviceCount; + UINT captureDeviceCount; + UINT iPlaybackDevice; + UINT iCaptureDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)(); + for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) { + MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iPlaybackDevice; + + if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + /* Capture. */ + captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)(); + for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) { + MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iCaptureDevice; + + if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + UINT winMMDeviceID; + + MA_ASSERT(pContext != NULL); + + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + winMMDeviceID = 0; + if (pDeviceID != NULL) { + winMMDeviceID = (UINT)pDeviceID->winmm; + } + + pDeviceInfo->id.winmm = winMMDeviceID; + + if (deviceType == ma_device_type_playback) { + MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo); + } + } else { + MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo); + } + } + + return MA_NO_DEVICE; +} + + +static void ma_device_uninit__winmm(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture); + CloseHandle((HANDLE)pDevice->winmm.hEventCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback); + ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback); + CloseHandle((HANDLE)pDevice->winmm.hEventPlayback); + } + + ma__free_from_callbacks(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); + + MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */ +} + +static ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + const char* errorMsg = ""; + ma_result errorCode = MA_ERROR; + ma_result result = MA_SUCCESS; + ma_uint32 heapSize; + UINT winMMDeviceIDPlayback = 0; + UINT winMMDeviceIDCapture = 0; + ma_uint32 periodSizeInMilliseconds; + + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->winmm); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exlusive mode with WinMM. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + if (periodSizeInMilliseconds == 0) { + periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate); + } + + /* WinMM has horrible latency. */ + if (pDevice->usingDefaultBufferSize) { + if (pConfig->performanceProfile == ma_performance_profile_low_latency) { + periodSizeInMilliseconds = 40; + } else { + periodSizeInMilliseconds = 400; + } + } + + + if (pConfig->playback.pDeviceID != NULL) { + winMMDeviceIDPlayback = (UINT)pConfig->playback.pDeviceID->winmm; + } + if (pConfig->capture.pDeviceID != NULL) { + winMMDeviceIDCapture = (UINT)pConfig->capture.pDeviceID->winmm; + } + + /* The capture device needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + WAVEINCAPSA caps; + WAVEFORMATEX wf; + MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventCapture = (ma_handle)CreateEventW(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventCapture == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueing for the capture device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((LPHWAVEIN)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC); + if (resultMM != MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX(&wf); + pDevice->capture.internalChannels = wf.nChannels; + pDevice->capture.internalSampleRate = wf.nSamplesPerSec; + ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalPeriods = pConfig->periods; + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->capture.internalSampleRate); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + WAVEOUTCAPSA caps; + WAVEFORMATEX wf; + MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventPlayback = (ma_handle)CreateEvent(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventPlayback == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueing for the playback device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveOutOpen)pContext->winmm.waveOutOpen)((LPHWAVEOUT)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC); + if (resultMM != MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX(&wf); + pDevice->playback.internalChannels = wf.nChannels; + pDevice->playback.internalSampleRate = wf.nSamplesPerSec; + ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalPeriods = pConfig->periods; + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->playback.internalSampleRate); + } + + /* + The heap allocated data is allocated like so: + + [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer] + */ + heapSize = 0; + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(WAVEHDR)*pDevice->capture.internalPeriods + (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + } + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(WAVEHDR)*pDevice->playback.internalPeriods + (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } + + pDevice->winmm._pHeapData = (ma_uint8*)ma__calloc_from_callbacks(heapSize, &pContext->allocationCallbacks); + if (pDevice->winmm._pHeapData == NULL) { + errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY; + goto on_error; + } + + MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_capture) { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods)); + } else { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalFormat, pDevice->capture.internalChannels); + + ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferCapture + (periodSizeInBytes*iPeriod)); + ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = periodSizeInBytes; + ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L; + ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveInPrepareHeader)pContext->winmm.waveInPrepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR)); + + /* + The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0; + } + } + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_playback) { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*pDevice->playback.internalPeriods); + } else { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods)); + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods)) + (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalFormat, pDevice->playback.internalChannels); + + ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferPlayback + (periodSizeInBytes*iPeriod)); + ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = periodSizeInBytes; + ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L; + ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveOutPrepareHeader)pContext->winmm.waveOutPrepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR)); + + /* + The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0; + } + } + + return MA_SUCCESS; + +on_error: + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + ((MA_PFN_waveInUnprepareHeader)pContext->winmm.waveInUnprepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR)); + } + } + + ((MA_PFN_waveInClose)pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + ((MA_PFN_waveOutUnprepareHeader)pContext->winmm.waveOutUnprepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR)); + } + } + + ((MA_PFN_waveOutClose)pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback); + } + + ma__free_from_callbacks(pDevice->winmm._pHeapData, &pContext->allocationCallbacks); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, errorMsg, errorCode); +} + +static ma_result ma_device_stop__winmm(ma_device* pDevice) +{ + MMRESULT resultMM; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.hDeviceCapture == NULL) { + return MA_INVALID_ARGS; + } + + resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MMSYSERR_NOERROR) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset capture device.", ma_result_from_MMRESULT(resultMM)); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_uint32 iPeriod; + WAVEHDR* pWAVEHDR; + + if (pDevice->winmm.hDevicePlayback == NULL) { + return MA_INVALID_ARGS; + } + + /* We need to drain the device. To do this we just loop over each header and if it's locked just wait for the event. */ + pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; iPeriod += 1) { + if (pWAVEHDR[iPeriod].dwUser == 1) { /* 1 = locked. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + break; /* An error occurred so just abandon ship and stop the device without draining. */ + } + + pWAVEHDR[iPeriod].dwUser = 0; + } + } + + resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback); + if (resultMM != MMSYSERR_NOERROR) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset playback device.", ma_result_from_MMRESULT(resultMM)); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + MMRESULT resultMM; + ma_uint32 totalFramesWritten; + WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + + /* Keep processing as much data as possible. */ + totalFramesWritten = 0; + while (totalFramesWritten < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */ + /* + This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to + write it out and move on to the next iteration. + */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten)); + const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf); + void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedPlayback += framesToCopy; + totalFramesWritten += framesToCopy; + + /* If we've consumed the buffer entirely we need to write it out to the device. */ + if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventPlayback); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(WAVEHDR)); + if (resultMM != MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed.", result); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods; + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If at this point we have consumed the entire input buffer we can return. */ + MA_ASSERT(totalFramesWritten <= frameCount); + if (totalFramesWritten == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */ + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { + break; + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesWritten; + } + + return result; +} + +static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + MMRESULT resultMM; + ma_uint32 totalFramesRead; + WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Keep processing as much data as possible. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */ + /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead)); + const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf); + void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedCapture += framesToCopy; + totalFramesRead += framesToCopy; + + /* If we've consumed the buffer entirely we need to add it back to the device. */ + if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(WAVEHDR)); + if (resultMM != MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed.", result); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods; + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If at this point we have filled the entire input buffer we can return. */ + MA_ASSERT(totalFramesRead <= frameCount); + if (totalFramesRead == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */ + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { + break; + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +static ma_result ma_device_main_loop__winmm(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + MA_ASSERT(pDevice != NULL); + + /* The capture device needs to be started immediately. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + MMRESULT resultMM; + WAVEHDR* pWAVEHDR; + ma_uint32 iPeriod; + + pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */ + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR)); + if (resultMM != MMSYSERR_NOERROR) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture.", ma_result_from_MMRESULT(resultMM)); + } + + /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */ + pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */ + } + + /* Capture devices need to be explicitly started, unlike playback devices. */ + resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MMSYSERR_NOERROR) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device.", ma_result_from_MMRESULT(resultMM)); + } + } + + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__winmm(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__winmm(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__winmm()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__winmm(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__winmm(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + + /* Here is where the device is started. */ + ma_device_stop__winmm(pDevice); + + return result; +} + +static ma_result ma_context_uninit__winmm(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_winmm); + + ma_dlclose(pContext, pContext->winmm.hWinMM); + return MA_SUCCESS; +} + +static ma_result ma_context_init__winmm(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->winmm.hWinMM = ma_dlopen(pContext, "winmm.dll"); + if (pContext->winmm.hWinMM == NULL) { + return MA_NO_BACKEND; + } + + pContext->winmm.waveOutGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetNumDevs"); + pContext->winmm.waveOutGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetDevCapsA"); + pContext->winmm.waveOutOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutOpen"); + pContext->winmm.waveOutClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutClose"); + pContext->winmm.waveOutPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutPrepareHeader"); + pContext->winmm.waveOutUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutUnprepareHeader"); + pContext->winmm.waveOutWrite = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutWrite"); + pContext->winmm.waveOutReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutReset"); + pContext->winmm.waveInGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetNumDevs"); + pContext->winmm.waveInGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetDevCapsA"); + pContext->winmm.waveInOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInOpen"); + pContext->winmm.waveInClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInClose"); + pContext->winmm.waveInPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInPrepareHeader"); + pContext->winmm.waveInUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInUnprepareHeader"); + pContext->winmm.waveInAddBuffer = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInAddBuffer"); + pContext->winmm.waveInStart = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInStart"); + pContext->winmm.waveInReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInReset"); + + pContext->onUninit = ma_context_uninit__winmm; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__winmm; + pContext->onEnumDevices = ma_context_enumerate_devices__winmm; + pContext->onGetDeviceInfo = ma_context_get_device_info__winmm; + pContext->onDeviceInit = ma_device_init__winmm; + pContext->onDeviceUninit = ma_device_uninit__winmm; + pContext->onDeviceStart = NULL; /* Not used with synchronous backends. */ + pContext->onDeviceStop = NULL; /* Not used with synchronous backends. */ + pContext->onDeviceMainLoop = ma_device_main_loop__winmm; + + return MA_SUCCESS; +} +#endif + + + + +/****************************************************************************** + +ALSA Backend + +******************************************************************************/ +#ifdef MA_HAS_ALSA + +#ifdef MA_NO_RUNTIME_LINKING +#include +typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t; +typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t; +typedef snd_pcm_stream_t ma_snd_pcm_stream_t; +typedef snd_pcm_format_t ma_snd_pcm_format_t; +typedef snd_pcm_access_t ma_snd_pcm_access_t; +typedef snd_pcm_t ma_snd_pcm_t; +typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef snd_pcm_info_t ma_snd_pcm_info_t; +typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t; +typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t; + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK +#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN +#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8 +#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE +#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE +#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE +#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE +#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE +#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE +#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE +#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE +#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE +#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE +#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW +#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW +#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE +#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE + +/* ma_snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN +#define MA_SND_CHMAP_NA SND_CHMAP_NA +#define MA_SND_CHMAP_MONO SND_CHMAP_MONO +#define MA_SND_CHMAP_FL SND_CHMAP_FL +#define MA_SND_CHMAP_FR SND_CHMAP_FR +#define MA_SND_CHMAP_RL SND_CHMAP_RL +#define MA_SND_CHMAP_RR SND_CHMAP_RR +#define MA_SND_CHMAP_FC SND_CHMAP_FC +#define MA_SND_CHMAP_LFE SND_CHMAP_LFE +#define MA_SND_CHMAP_SL SND_CHMAP_SL +#define MA_SND_CHMAP_SR SND_CHMAP_SR +#define MA_SND_CHMAP_RC SND_CHMAP_RC +#define MA_SND_CHMAP_FLC SND_CHMAP_FLC +#define MA_SND_CHMAP_FRC SND_CHMAP_FRC +#define MA_SND_CHMAP_RLC SND_CHMAP_RLC +#define MA_SND_CHMAP_RRC SND_CHMAP_RRC +#define MA_SND_CHMAP_FLW SND_CHMAP_FLW +#define MA_SND_CHMAP_FRW SND_CHMAP_FRW +#define MA_SND_CHMAP_FLH SND_CHMAP_FLH +#define MA_SND_CHMAP_FCH SND_CHMAP_FCH +#define MA_SND_CHMAP_FRH SND_CHMAP_FRH +#define MA_SND_CHMAP_TC SND_CHMAP_TC +#define MA_SND_CHMAP_TFL SND_CHMAP_TFL +#define MA_SND_CHMAP_TFR SND_CHMAP_TFR +#define MA_SND_CHMAP_TFC SND_CHMAP_TFC +#define MA_SND_CHMAP_TRL SND_CHMAP_TRL +#define MA_SND_CHMAP_TRR SND_CHMAP_TRR +#define MA_SND_CHMAP_TRC SND_CHMAP_TRC +#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC +#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC +#define MA_SND_CHMAP_TSL SND_CHMAP_TSL +#define MA_SND_CHMAP_TSR SND_CHMAP_TSR +#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE +#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE +#define MA_SND_CHMAP_BC SND_CHMAP_BC +#define MA_SND_CHMAP_BLC SND_CHMAP_BLC +#define MA_SND_CHMAP_BRC SND_CHMAP_BRC + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE +#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS +#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT +#else +#include /* For EPIPE, etc. */ +typedef unsigned long ma_snd_pcm_uframes_t; +typedef long ma_snd_pcm_sframes_t; +typedef int ma_snd_pcm_stream_t; +typedef int ma_snd_pcm_format_t; +typedef int ma_snd_pcm_access_t; +typedef struct ma_snd_pcm_t ma_snd_pcm_t; +typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t; +typedef struct +{ + void* addr; + unsigned int first; + unsigned int step; +} ma_snd_pcm_channel_area_t; +typedef struct +{ + unsigned int channels; + unsigned int pos[1]; +} ma_snd_pcm_chmap_t; + +/* snd_pcm_state_t */ +#define MA_SND_PCM_STATE_OPEN 0 +#define MA_SND_PCM_STATE_SETUP 1 +#define MA_SND_PCM_STATE_PREPARED 2 +#define MA_SND_PCM_STATE_RUNNING 3 +#define MA_SND_PCM_STATE_XRUN 4 +#define MA_SND_PCM_STATE_DRAINING 5 +#define MA_SND_PCM_STATE_PAUSED 6 +#define MA_SND_PCM_STATE_SUSPENDED 7 +#define MA_SND_PCM_STATE_DISCONNECTED 8 + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK 0 +#define MA_SND_PCM_STREAM_CAPTURE 1 + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN -1 +#define MA_SND_PCM_FORMAT_U8 1 +#define MA_SND_PCM_FORMAT_S16_LE 2 +#define MA_SND_PCM_FORMAT_S16_BE 3 +#define MA_SND_PCM_FORMAT_S24_LE 6 +#define MA_SND_PCM_FORMAT_S24_BE 7 +#define MA_SND_PCM_FORMAT_S32_LE 10 +#define MA_SND_PCM_FORMAT_S32_BE 11 +#define MA_SND_PCM_FORMAT_FLOAT_LE 14 +#define MA_SND_PCM_FORMAT_FLOAT_BE 15 +#define MA_SND_PCM_FORMAT_FLOAT64_LE 16 +#define MA_SND_PCM_FORMAT_FLOAT64_BE 17 +#define MA_SND_PCM_FORMAT_MU_LAW 20 +#define MA_SND_PCM_FORMAT_A_LAW 21 +#define MA_SND_PCM_FORMAT_S24_3LE 32 +#define MA_SND_PCM_FORMAT_S24_3BE 33 + +/* snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0 +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1 +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2 +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3 +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4 + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN 0 +#define MA_SND_CHMAP_NA 1 +#define MA_SND_CHMAP_MONO 2 +#define MA_SND_CHMAP_FL 3 +#define MA_SND_CHMAP_FR 4 +#define MA_SND_CHMAP_RL 5 +#define MA_SND_CHMAP_RR 6 +#define MA_SND_CHMAP_FC 7 +#define MA_SND_CHMAP_LFE 8 +#define MA_SND_CHMAP_SL 9 +#define MA_SND_CHMAP_SR 10 +#define MA_SND_CHMAP_RC 11 +#define MA_SND_CHMAP_FLC 12 +#define MA_SND_CHMAP_FRC 13 +#define MA_SND_CHMAP_RLC 14 +#define MA_SND_CHMAP_RRC 15 +#define MA_SND_CHMAP_FLW 16 +#define MA_SND_CHMAP_FRW 17 +#define MA_SND_CHMAP_FLH 18 +#define MA_SND_CHMAP_FCH 19 +#define MA_SND_CHMAP_FRH 20 +#define MA_SND_CHMAP_TC 21 +#define MA_SND_CHMAP_TFL 22 +#define MA_SND_CHMAP_TFR 23 +#define MA_SND_CHMAP_TFC 24 +#define MA_SND_CHMAP_TRL 25 +#define MA_SND_CHMAP_TRR 26 +#define MA_SND_CHMAP_TRC 27 +#define MA_SND_CHMAP_TFLC 28 +#define MA_SND_CHMAP_TFRC 29 +#define MA_SND_CHMAP_TSL 30 +#define MA_SND_CHMAP_TSR 31 +#define MA_SND_CHMAP_LLFE 32 +#define MA_SND_CHMAP_RLFE 33 +#define MA_SND_CHMAP_BC 34 +#define MA_SND_CHMAP_BLC 35 +#define MA_SND_CHMAP_BRC 36 + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000 +#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000 +#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000 +#endif + +typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode); +typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm); +typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val); +typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask); +typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access); +typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access); +typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val); +typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void); +typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val); +typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints); +typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id); +typedef int (* ma_snd_card_get_index_proc) (const char *name); +typedef int (* ma_snd_device_name_free_hint_proc) (void **hints); +typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames); +typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout); +typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info); +typedef size_t (* ma_snd_pcm_info_sizeof_proc) (); +typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info); +typedef int (* ma_snd_config_update_free_global_proc) (); + +/* This array specifies each of the common devices that can be used for both playback and capture. */ +static const char* g_maCommonDeviceNamesALSA[] = { + "default", + "null", + "pulse", + "jack" +}; + +/* This array allows us to blacklist specific playback devices. */ +static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = { + "" +}; + +/* This array allows us to blacklist specific capture devices. */ +static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = { + "" +}; + + +/* +This array allows miniaudio to control device-specific default buffer sizes. This uses a scaling factor. Order is important. If +any part of the string is present in the device's name, the associated scale will be used. +*/ +static struct +{ + const char* name; + float scale; +} g_maDefaultBufferSizeScalesALSA[] = { + {"bcm2835 IEC958/HDMI", 2.0f}, + {"bcm2835 ALSA", 2.0f} +}; + +static float ma_find_default_buffer_size_scale__alsa(const char* deviceName) +{ + size_t i; + + if (deviceName == NULL) { + return 1; + } + + for (i = 0; i < ma_countof(g_maDefaultBufferSizeScalesALSA); ++i) { + if (strstr(g_maDefaultBufferSizeScalesALSA[i].name, deviceName) != NULL) { + return g_maDefaultBufferSizeScalesALSA[i].scale; + } + } + + return 1; +} + +static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) +{ + ma_snd_pcm_format_t ALSAFormats[] = { + MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */ + MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */ + MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */ + MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */ + MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */ + MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */ + }; + + if (ma_is_big_endian()) { + ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN; + ALSAFormats[1] = MA_SND_PCM_FORMAT_U8; + ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE; + ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE; + ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE; + ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE; + } + + return ALSAFormats[format]; +} + +static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) +{ + if (ma_is_little_endian()) { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32; + default: break; + } + } else { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (formatALSA) { + case MA_SND_PCM_FORMAT_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos) +{ + switch (alsaChannelPos) + { + case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO; + case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT; + case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT; + case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT; + case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT; + case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER; + case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE; + case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT; + case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT; + case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER; + case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_SND_CHMAP_RLC: return 0; + case MA_SND_CHMAP_RRC: return 0; + case MA_SND_CHMAP_FLW: return 0; + case MA_SND_CHMAP_FRW: return 0; + case MA_SND_CHMAP_FLH: return 0; + case MA_SND_CHMAP_FCH: return 0; + case MA_SND_CHMAP_FRH: return 0; + case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER; + case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER; + default: break; + } + + return 0; +} + +static ma_bool32 ma_is_common_device_name__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name) +{ + if (deviceType == ma_device_type_playback) { + return ma_is_playback_device_blacklisted__alsa(name); + } else { + return ma_is_capture_device_blacklisted__alsa(name); + } +} + + +static const char* ma_find_char(const char* str, char c, int* index) +{ + int i = 0; + for (;;) { + if (str[i] == '\0') { + if (index) *index = -1; + return NULL; + } + + if (str[i] == c) { + if (index) *index = i; + return str + i; + } + + i += 1; + } + + /* Should never get here, but treat it as though the character was not found to make me feel better inside. */ + if (index) *index = -1; + return NULL; +} + +static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) +{ + /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */ + + int commaPos; + const char* dev; + int i; + + if (hwid == NULL) { + return MA_FALSE; + } + + if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') { + return MA_FALSE; + } + + hwid += 3; + + dev = ma_find_char(hwid, ',', &commaPos); + if (dev == NULL) { + return MA_FALSE; + } else { + dev += 1; /* Skip past the ",". */ + } + + /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */ + for (i = 0; i < commaPos; ++i) { + if (hwid[i] < '0' || hwid[i] > '9') { + return MA_FALSE; + } + } + + /* Check if everything after the "," is numeric. If not, return false. */ + i = 0; + while (dev[i] != '\0') { + if (dev[i] < '0' || dev[i] > '9') { + return MA_FALSE; + } + i += 1; + } + + return MA_TRUE; +} + +static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */ +{ + /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */ + + int colonPos; + int commaPos; + char card[256]; + const char* dev; + int cardIndex; + + if (dst == NULL) { + return -1; + } + if (dstSize < 7) { + return -1; /* Absolute minimum size of the output buffer is 7 bytes. */ + } + + *dst = '\0'; /* Safety. */ + if (src == NULL) { + return -1; + } + + /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */ + if (ma_is_device_name_in_hw_format__alsa(src)) { + return ma_strcpy_s(dst, dstSize, src); + } + + src = ma_find_char(src, ':', &colonPos); + if (src == NULL) { + return -1; /* Couldn't find a colon */ + } + + dev = ma_find_char(src, ',', &commaPos); + if (dev == NULL) { + dev = "0"; + ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */ + } else { + dev = dev + 5; /* +5 = ",DEV=" */ + ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */ + } + + cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card); + if (cardIndex < 0) { + return -2; /* Failed to retrieve the card index. */ + } + + /*printf("TESTING: CARD=%s,DEV=%s\n", card, dev); */ + + + /* Construction. */ + dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':'; + if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, ",") != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, dev) != 0) { + return -3; + } + + return 0; +} + +static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID) +{ + ma_uint32 i; + + MA_ASSERT(pHWID != NULL); + + for (i = 0; i < count; ++i) { + if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, int openMode, ma_snd_pcm_t** ppPCM) +{ + ma_snd_pcm_t* pPCM; + ma_snd_pcm_stream_t stream; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppPCM != NULL); + + *ppPCM = NULL; + pPCM = NULL; + + stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE; + + if (pDeviceID == NULL) { + ma_bool32 isDeviceOpen; + size_t i; + + /* + We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes + me feel better to try as hard as we can get to get _something_ working. + */ + const char* defaultDeviceNames[] = { + "default", + NULL, + NULL, + NULL, + NULL, + NULL, + NULL + }; + + if (shareMode == ma_share_mode_exclusive) { + defaultDeviceNames[1] = "hw"; + defaultDeviceNames[2] = "hw:0"; + defaultDeviceNames[3] = "hw:0,0"; + } else { + if (deviceType == ma_device_type_playback) { + defaultDeviceNames[1] = "dmix"; + defaultDeviceNames[2] = "dmix:0"; + defaultDeviceNames[3] = "dmix:0,0"; + } else { + defaultDeviceNames[1] = "dsnoop"; + defaultDeviceNames[2] = "dsnoop:0"; + defaultDeviceNames[3] = "dsnoop:0,0"; + } + defaultDeviceNames[4] = "hw"; + defaultDeviceNames[5] = "hw:0"; + defaultDeviceNames[6] = "hw:0,0"; + } + + isDeviceOpen = MA_FALSE; + for (i = 0; i < ma_countof(defaultDeviceNames); ++i) { + if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') { + if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) { + isDeviceOpen = MA_TRUE; + break; + } + } + } + + if (!isDeviceOpen) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + } else { + /* + We're trying to open a specific device. There's a few things to consider here: + + miniaudio recongnizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When + an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it + finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw"). + */ + + /* May end up needing to make small adjustments to the ID, so make a copy. */ + ma_device_id deviceID = *pDeviceID; + int resultALSA = -ENODEV; + + if (deviceID.alsa[0] != ':') { + /* The ID is not in ":0,0" format. Use the ID exactly as-is. */ + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode); + } else { + char hwid[256]; + + /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */ + if (deviceID.alsa[1] == '\0') { + deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */ + } + + if (shareMode == ma_share_mode_shared) { + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(hwid, sizeof(hwid), "dmix"); + } else { + ma_strcpy_s(hwid, sizeof(hwid), "dsnoop"); + } + + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + + /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */ + if (resultALSA != 0) { + ma_strcpy_s(hwid, sizeof(hwid), "hw"); + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + } + + if (resultALSA < 0) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed.", ma_result_from_errno(-resultALSA)); + } + } + + *ppPCM = pPCM; + return MA_SUCCESS; +} + + +static ma_bool32 ma_context_is_device_id_equal__alsa(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->alsa, pID1->alsa) == 0; +} + +static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int resultALSA; + ma_bool32 cbResult = MA_TRUE; + char** ppDeviceHints; + ma_device_id* pUniqueIDs = NULL; + ma_uint32 uniqueIDCount = 0; + char** ppNextDeviceHint; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock); + + resultALSA = ((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints); + if (resultALSA < 0) { + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + return ma_result_from_errno(-resultALSA); + } + + ppNextDeviceHint = ppDeviceHints; + while (*ppNextDeviceHint != NULL) { + char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME"); + char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC"); + char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID"); + ma_device_type deviceType = ma_device_type_playback; + ma_bool32 stopEnumeration = MA_FALSE; + char hwid[sizeof(pUniqueIDs->alsa)]; + ma_device_info deviceInfo; + + if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) { + deviceType = ma_device_type_playback; + } + if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) { + deviceType = ma_device_type_capture; + } + + if (NAME != NULL) { + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Use the name exactly as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } else { + /* Simplified mode. Use ":%d,%d" format. */ + if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) { + /* + At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the + plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device + initialization time and is used as an indicator to try and use the most appropriate plugin depending on the + device type and sharing mode. + */ + char* dst = hwid; + char* src = hwid+2; + while ((*dst++ = *src++)); + } else { + /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } + + if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) { + goto next_device; /* The device has already been enumerated. Move on to the next one. */ + } else { + /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */ + size_t oldCapacity = sizeof(*pUniqueIDs) * uniqueIDCount; + size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1); + ma_device_id* pNewUniqueIDs = (ma_device_id*)ma__realloc_from_callbacks(pUniqueIDs, newCapacity, oldCapacity, &pContext->allocationCallbacks); + if (pNewUniqueIDs == NULL) { + goto next_device; /* Failed to allocate memory. */ + } + + pUniqueIDs = pNewUniqueIDs; + MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid)); + uniqueIDCount += 1; + } + } + } else { + MA_ZERO_MEMORY(hwid, sizeof(hwid)); + } + + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1); + + /* + DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose + device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish + between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the + description. + + The value in DESC seems to be split into two lines, with the first line being the name of the device and the + second line being a description of the device. I don't like having the description be across two lines because + it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line + being put into parentheses. In simplified mode I'm just stripping the second line entirely. + */ + if (DESC != NULL) { + int lfPos; + const char* line2 = ma_find_char(DESC, '\n', &lfPos); + if (line2 != NULL) { + line2 += 1; /* Skip past the new-line character. */ + + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Put the second line in brackets. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " ("); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")"); + } else { + /* Simplified mode. Strip the second line entirely. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + } + } else { + /* There's no second line. Just copy the whole description. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1); + } + } + + if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) { + cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + } + + /* + Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback + again for the other device type in this case. We do this for known devices. + */ + if (cbResult) { + if (ma_is_common_device_name__alsa(NAME)) { + if (deviceType == ma_device_type_playback) { + if (!ma_is_capture_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } else { + if (!ma_is_playback_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + } + } + + if (cbResult == MA_FALSE) { + stopEnumeration = MA_TRUE; + } + + next_device: + free(NAME); + free(DESC); + free(IOID); + ppNextDeviceHint += 1; + + /* We need to stop enumeration if the callback returned false. */ + if (stopEnumeration) { + break; + } + } + + ma__free_from_callbacks(pUniqueIDs, &pContext->allocationCallbacks); + ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints); + + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_device_type deviceType; + const ma_device_id* pDeviceID; + ma_share_mode shareMode; + ma_device_info* pDeviceInfo; + ma_bool32 foundDevice; +} ma_context_get_device_info_enum_callback_data__alsa; + +static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +{ + ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData; + MA_ASSERT(pData != NULL); + + if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } else { + if (pData->deviceType == deviceType && ma_context_is_device_id_equal__alsa(pContext, pData->pDeviceID, &pDeviceInfo->id)) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } + } + + /* Keep enumerating until we have found the device. */ + return !pData->foundDevice; +} + +static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_context_get_device_info_enum_callback_data__alsa data; + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_snd_pcm_hw_params_t* pHWParams; + ma_snd_pcm_format_mask_t* pFormatMask; + int sampleRateDir = 0; + + MA_ASSERT(pContext != NULL); + + /* We just enumerate to find basic information about the device. */ + data.deviceType = deviceType; + data.pDeviceID = pDeviceID; + data.shareMode = shareMode; + data.pDeviceInfo = pDeviceInfo; + data.foundDevice = MA_FALSE; + result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data); + if (result != MA_SUCCESS) { + return result; + } + + if (!data.foundDevice) { + return MA_NO_DEVICE; + } + + /* For detailed info we need to open the device. */ + result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, 0, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + /* We need to initialize a HW parameters object in order to know what formats are supported. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); + if (pHWParams == NULL) { + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", ma_result_from_errno(-resultALSA)); + } + + ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &pDeviceInfo->minChannels); + ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &pDeviceInfo->maxChannels); + ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &pDeviceInfo->minSampleRate, &sampleRateDir); + ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &pDeviceInfo->maxSampleRate, &sampleRateDir); + + /* Formats. */ + pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks); + if (pFormatMask == NULL) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask); + + pDeviceInfo->formatCount = 0; + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_U8)) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8; + } + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S16_LE)) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16; + } + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S24_3LE)) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s24; + } + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S32_LE)) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32; + } + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_FLOAT_LE)) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_f32; + } + + ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + return MA_SUCCESS; +} + + +#if 0 +/* +Waits for a number of frames to become available for either capture or playback. The return +value is the number of frames available. + +This will return early if the main loop is broken with ma_device__break_main_loop(). +*/ +static ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequiresRestart) +{ + MA_ASSERT(pDevice != NULL); + + if (pRequiresRestart) *pRequiresRestart = MA_FALSE; + + /* I want it so that this function returns the period size in frames. We just wait until that number of frames are available and then return. */ + ma_uint32 periodSizeInFrames = pDevice->bufferSizeInFrames / pDevice->periods; + while (!pDevice->alsa.breakFromMainLoop) { + ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM); + if (framesAvailable < 0) { + if (framesAvailable == -EPIPE) { + if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesAvailable, MA_TRUE) < 0) { + return 0; + } + + /* A device recovery means a restart for mmap mode. */ + if (pRequiresRestart) { + *pRequiresRestart = MA_TRUE; + } + + /* Try again, but if it fails this time just return an error. */ + framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM); + if (framesAvailable < 0) { + return 0; + } + } + } + + if (framesAvailable >= periodSizeInFrames) { + return periodSizeInFrames; + } + + if (framesAvailable < periodSizeInFrames) { + /* Less than a whole period is available so keep waiting. */ + int waitResult = ((ma_snd_pcm_wait_proc)pDevice->pContext->alsa.snd_pcm_wait)((ma_snd_pcm_t*)pDevice->alsa.pPCM, -1); + if (waitResult < 0) { + if (waitResult == -EPIPE) { + if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, waitResult, MA_TRUE) < 0) { + return 0; + } + + /* A device recovery means a restart for mmap mode. */ + if (pRequiresRestart) { + *pRequiresRestart = MA_TRUE; + } + } + } + } + } + + /* We'll get here if the loop was terminated. Just return whatever's available. */ + ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM); + if (framesAvailable < 0) { + return 0; + } + + return framesAvailable; +} + +static ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + if (!ma_device_is_started(pDevice) && ma_device__get_state(pDevice) != MA_STATE_STARTING) { + return MA_FALSE; + } + if (pDevice->alsa.breakFromMainLoop) { + return MA_FALSE; + } + + if (pDevice->alsa.isUsingMMap) { + /* mmap. */ + ma_bool32 requiresRestart; + ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart); + if (framesAvailable == 0) { + return MA_FALSE; + } + + /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */ + if (pDevice->alsa.breakFromMainLoop) { + return MA_FALSE; + } + + const ma_snd_pcm_channel_area_t* pAreas; + ma_snd_pcm_uframes_t mappedOffset; + ma_snd_pcm_uframes_t mappedFrames = framesAvailable; + while (framesAvailable > 0) { + int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames); + if (result < 0) { + return MA_FALSE; + } + + if (mappedFrames > 0) { + void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8); + ma_device__read_frames_from_client(pDevice, mappedFrames, pBuffer); + } + + result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames); + if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) { + ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE); + return MA_FALSE; + } + + if (requiresRestart) { + if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) { + return MA_FALSE; + } + } + + if (framesAvailable >= mappedFrames) { + framesAvailable -= mappedFrames; + } else { + framesAvailable = 0; + } + } + } else { + /* readi/writei. */ + while (!pDevice->alsa.breakFromMainLoop) { + ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL); + if (framesAvailable == 0) { + continue; + } + + /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */ + if (pDevice->alsa.breakFromMainLoop) { + return MA_FALSE; + } + + ma_device__read_frames_from_client(pDevice, framesAvailable, pDevice->alsa.pIntermediaryBuffer); + + ma_snd_pcm_sframes_t framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable); + if (framesWritten < 0) { + if (framesWritten == -EAGAIN) { + continue; /* Just keep trying... */ + } else if (framesWritten == -EPIPE) { + /* Underrun. Just recover and try writing again. */ + if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesWritten, MA_TRUE) < 0) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE); + return MA_FALSE; + } + + framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable); + if (framesWritten < 0) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to the internal device.", ma_result_from_errno((int)-framesWritten)); + return MA_FALSE; + } + + break; /* Success. */ + } else { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_writei() failed when writing initial data.", ma_result_from_errno((int)-framesWritten)); + return MA_FALSE; + } + } else { + break; /* Success. */ + } + } + } + + return MA_TRUE; +} + +static ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + if (!ma_device_is_started(pDevice)) { + return MA_FALSE; + } + if (pDevice->alsa.breakFromMainLoop) { + return MA_FALSE; + } + + ma_uint32 framesToSend = 0; + void* pBuffer = NULL; + if (pDevice->alsa.pIntermediaryBuffer == NULL) { + /* mmap. */ + ma_bool32 requiresRestart; + ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart); + if (framesAvailable == 0) { + return MA_FALSE; + } + + const ma_snd_pcm_channel_area_t* pAreas; + ma_snd_pcm_uframes_t mappedOffset; + ma_snd_pcm_uframes_t mappedFrames = framesAvailable; + while (framesAvailable > 0) { + int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames); + if (result < 0) { + return MA_FALSE; + } + + if (mappedFrames > 0) { + void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8); + ma_device__send_frames_to_client(pDevice, mappedFrames, pBuffer); + } + + result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames); + if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) { + ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE); + return MA_FALSE; + } + + if (requiresRestart) { + if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) { + return MA_FALSE; + } + } + + if (framesAvailable >= mappedFrames) { + framesAvailable -= mappedFrames; + } else { + framesAvailable = 0; + } + } + } else { + /* readi/writei. */ + ma_snd_pcm_sframes_t framesRead = 0; + while (!pDevice->alsa.breakFromMainLoop) { + ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL); + if (framesAvailable == 0) { + continue; + } + + framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable); + if (framesRead < 0) { + if (framesRead == -EAGAIN) { + continue; /* Just keep trying... */ + } else if (framesRead == -EPIPE) { + /* Overrun. Just recover and try reading again. */ + if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesRead, MA_TRUE) < 0) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", MA_FAILED_TO_START_BACKEND_DEVICE); + return MA_FALSE; + } + + framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable); + if (framesRead < 0) { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", ma_result_from_errno((int)-framesRead)); + return MA_FALSE; + } + + break; /* Success. */ + } else { + return MA_FALSE; + } + } else { + break; /* Success. */ + } + } + + framesToSend = framesRead; + pBuffer = pDevice->alsa.pIntermediaryBuffer; + } + + if (framesToSend > 0) { + ma_device__send_frames_to_client(pDevice, framesToSend, pBuffer); + } + + return MA_TRUE; +} +#endif /* 0 */ + +static void ma_device_uninit__alsa(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + } + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + } +} + +static ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_bool32 isUsingMMap; + ma_snd_pcm_format_t formatALSA; + ma_share_mode shareMode; + ma_device_id* pDeviceID; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + int openMode; + ma_snd_pcm_hw_params_t* pHWParams; + ma_snd_pcm_sw_params_t* pSWParams; + ma_snd_pcm_uframes_t bufferBoundary; + float bufferSizeScaleFactor; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */ + MA_ASSERT(pDevice != NULL); + + formatALSA = ma_convert_ma_format_to_alsa_format((deviceType == ma_device_type_capture) ? pConfig->capture.format : pConfig->playback.format); + shareMode = (deviceType == ma_device_type_capture) ? pConfig->capture.shareMode : pConfig->playback.shareMode; + pDeviceID = (deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID : pConfig->playback.pDeviceID; + + openMode = 0; + if (pConfig->alsa.noAutoResample) { + openMode |= MA_SND_PCM_NO_AUTO_RESAMPLE; + } + if (pConfig->alsa.noAutoChannels) { + openMode |= MA_SND_PCM_NO_AUTO_CHANNELS; + } + if (pConfig->alsa.noAutoFormat) { + openMode |= MA_SND_PCM_NO_AUTO_FORMAT; + } + + result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, openMode, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + /* If using the default buffer size we may want to apply some device-specific scaling for known devices that have peculiar latency characteristics */ + bufferSizeScaleFactor = 1; + if (pDevice->usingDefaultBufferSize) { + ma_snd_pcm_info_t* pInfo = (ma_snd_pcm_info_t*)ma__calloc_from_callbacks(((ma_snd_pcm_info_sizeof_proc)pContext->alsa.snd_pcm_info_sizeof)(), &pContext->allocationCallbacks); + if (pInfo == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* We may need to scale the size of the buffer depending on the device. */ + if (((ma_snd_pcm_info_proc)pContext->alsa.snd_pcm_info)(pPCM, pInfo) == 0) { + const char* deviceName = ((ma_snd_pcm_info_get_name_proc)pContext->alsa.snd_pcm_info_get_name)(pInfo); + if (deviceName != NULL) { + if (ma_strcmp(deviceName, "default") == 0) { + char** ppDeviceHints; + char** ppNextDeviceHint; + + /* It's the default device. We need to use DESC from snd_device_name_hint(). */ + if (((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints) < 0) { + ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks); + return MA_NO_BACKEND; + } + + ppNextDeviceHint = ppDeviceHints; + while (*ppNextDeviceHint != NULL) { + char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME"); + char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC"); + char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID"); + + ma_bool32 foundDevice = MA_FALSE; + if ((deviceType == ma_device_type_playback && (IOID == NULL || ma_strcmp(IOID, "Output") == 0)) || + (deviceType == ma_device_type_capture && (IOID != NULL && ma_strcmp(IOID, "Input" ) == 0))) { + if (ma_strcmp(NAME, deviceName) == 0) { + bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(DESC); + foundDevice = MA_TRUE; + } + } + + free(NAME); + free(DESC); + free(IOID); + ppNextDeviceHint += 1; + + if (foundDevice) { + break; + } + } + + ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints); + } else { + bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(deviceName); + } + } + } + + ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks); + } + + + /* Hardware parameters. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); + if (pHWParams == NULL) { + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", ma_result_from_errno(-resultALSA)); + } + + /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */ + isUsingMMap = MA_FALSE; +#if 0 /* NOTE: MMAP mode temporarily disabled. */ + if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */ + if (!pConfig->alsa.noMMap && ma_device__is_async(pDevice)) { + if (((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) { + pDevice->alsa.isUsingMMap = MA_TRUE; + } + } + } +#endif + + if (!isUsingMMap) { + resultALSA = ((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed.", ma_result_from_errno(-resultALSA)); + } + } + + /* + Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't + find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS. + */ + + /* Format. */ + { + ma_snd_pcm_format_mask_t* pFormatMask; + + /* Try getting every supported format first. */ + pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks); + if (pFormatMask == NULL) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return MA_OUT_OF_MEMORY; + } + + ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask); + + /* + At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is + supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one. + */ + if (!((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, formatALSA)) { + size_t i; + + /* The requested format is not supported so now try running through the list of formats and return the best one. */ + ma_snd_pcm_format_t preferredFormatsALSA[] = { + MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */ + MA_SND_PCM_FORMAT_FLOAT_LE, /* ma_format_f32 */ + MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */ + MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */ + MA_SND_PCM_FORMAT_U8 /* ma_format_u8 */ + }; + + if (ma_is_big_endian()) { + preferredFormatsALSA[0] = MA_SND_PCM_FORMAT_S16_BE; + preferredFormatsALSA[1] = MA_SND_PCM_FORMAT_FLOAT_BE; + preferredFormatsALSA[2] = MA_SND_PCM_FORMAT_S32_BE; + preferredFormatsALSA[3] = MA_SND_PCM_FORMAT_S24_3BE; + preferredFormatsALSA[4] = MA_SND_PCM_FORMAT_U8; + } + + formatALSA = MA_SND_PCM_FORMAT_UNKNOWN; + for (i = 0; i < (sizeof(preferredFormatsALSA) / sizeof(preferredFormatsALSA[0])); ++i) { + if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, preferredFormatsALSA[i])) { + formatALSA = preferredFormatsALSA[i]; + break; + } + } + + if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats.", MA_FORMAT_NOT_SUPPORTED); + } + } + + ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks); + pFormatMask = NULL; + + resultALSA = ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed.", ma_result_from_errno(-resultALSA)); + } + + internalFormat = ma_format_from_alsa(formatALSA); + if (internalFormat == ma_format_unknown) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED); + } + } + + /* Channels. */ + { + unsigned int channels = (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels; + resultALSA = ((ma_snd_pcm_hw_params_set_channels_near_proc)pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed.", ma_result_from_errno(-resultALSA)); + } + internalChannels = (ma_uint32)channels; + } + + /* Sample Rate */ + { + unsigned int sampleRate; + + /* + It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes + problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable + resampling. + + To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a + sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling + doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly + faster rate. + + miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine + for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very + good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion. + + I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce + this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins. + */ + ((ma_snd_pcm_hw_params_set_rate_resample_proc)pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0); + + sampleRate = pConfig->sampleRate; + resultALSA = ((ma_snd_pcm_hw_params_set_rate_near_proc)pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed.", ma_result_from_errno(-resultALSA)); + } + internalSampleRate = (ma_uint32)sampleRate; + } + + /* Periods. */ + { + ma_uint32 periods = pConfig->periods; + resultALSA = ((ma_snd_pcm_hw_params_set_periods_near_proc)pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed.", ma_result_from_errno(-resultALSA)); + } + internalPeriods = periods; + } + + /* Buffer Size */ + { + ma_snd_pcm_uframes_t actualBufferSizeInFrames = pConfig->periodSizeInFrames * internalPeriods; + if (actualBufferSizeInFrames == 0) { + actualBufferSizeInFrames = ma_scale_buffer_size(ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate), bufferSizeScaleFactor) * internalPeriods; + } + + resultALSA = ((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed.", ma_result_from_errno(-resultALSA)); + } + internalPeriodSizeInFrames = actualBufferSizeInFrames / internalPeriods; + } + + /* Apply hardware parameters. */ + resultALSA = ((ma_snd_pcm_hw_params_proc)pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams); + if (resultALSA < 0) { + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed.", ma_result_from_errno(-resultALSA)); + } + + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); + pHWParams = NULL; + + + /* Software parameters. */ + pSWParams = (ma_snd_pcm_sw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_sw_params_sizeof_proc)pContext->alsa.snd_pcm_sw_params_sizeof)(), &pContext->allocationCallbacks); + if (pSWParams == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_sw_params_current_proc)pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams); + if (resultALSA < 0) { + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed.", ma_result_from_errno(-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_avail_min_proc)pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalPeriodSizeInFrames)); + if (resultALSA < 0) { + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.", ma_result_from_errno(-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_sw_params_get_boundary_proc)pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary); + if (resultALSA < 0) { + bufferBoundary = internalPeriodSizeInFrames * internalPeriods; + } + + /*printf("TRACE: bufferBoundary=%ld\n", bufferBoundary);*/ + + if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */ + /* + Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to + the size of a period. But for full-duplex we need to set it such that it is at least two periods. + */ + resultALSA = ((ma_snd_pcm_sw_params_set_start_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalPeriodSizeInFrames*2); + if (resultALSA < 0) { + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed.", ma_result_from_errno(-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_stop_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary); + if (resultALSA < 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */ + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed.", ma_result_from_errno(-resultALSA)); + } + } + + resultALSA = ((ma_snd_pcm_sw_params_proc)pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams); + if (resultALSA < 0) { + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed.", ma_result_from_errno(-resultALSA)); + } + + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); + pSWParams = NULL; + + + /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */ + { + ma_snd_pcm_chmap_t* pChmap = ((ma_snd_pcm_get_chmap_proc)pContext->alsa.snd_pcm_get_chmap)(pPCM); + if (pChmap != NULL) { + ma_uint32 iChannel; + + /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */ + if (pChmap->channels >= internalChannels) { + /* Drop excess channels. */ + for (iChannel = 0; iChannel < internalChannels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + } else { + ma_uint32 i; + + /* + Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate + channels. If validation fails, fall back to defaults. + */ + ma_bool32 isValid = MA_TRUE; + + /* Fill with defaults. */ + ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap); + + /* Overwrite first pChmap->channels channels. */ + for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + + /* Validate. */ + for (i = 0; i < internalChannels && isValid; ++i) { + ma_uint32 j; + for (j = i+1; j < internalChannels; ++j) { + if (internalChannelMap[i] == internalChannelMap[j]) { + isValid = MA_FALSE; + break; + } + } + } + + /* If our channel map is invalid, fall back to defaults. */ + if (!isValid) { + ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap); + } + } + + free(pChmap); + pChmap = NULL; + } else { + /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */ + ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap); + } + } + + + /* We're done. Prepare the device. */ + resultALSA = ((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM); + if (resultALSA < 0) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.", ma_result_from_errno(-resultALSA)); + } + + + if (deviceType == ma_device_type_capture) { + pDevice->alsa.pPCMCapture = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapCapture = isUsingMMap; + pDevice->capture.internalFormat = internalFormat; + pDevice->capture.internalChannels = internalChannels; + pDevice->capture.internalSampleRate = internalSampleRate; + ma_channel_map_copy(pDevice->capture.internalChannelMap, internalChannelMap, internalChannels); + pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->capture.internalPeriods = internalPeriods; + } else { + pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapPlayback = isUsingMMap; + pDevice->playback.internalFormat = internalFormat; + pDevice->playback.internalChannels = internalChannels; + pDevice->playback.internalSampleRate = internalSampleRate; + ma_channel_map_copy(pDevice->playback.internalChannelMap, internalChannelMap, internalChannels); + pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->playback.internalPeriods = internalPeriods; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->alsa); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_snd_pcm_sframes_t resultALSA; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + for (;;) { + resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*printf("TRACE: EGAIN (read)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + #if defined(MA_DEBUG_OUTPUT) + printf("TRACE: EPIPE (read)\n"); + #endif + + /* Overrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", ma_result_from_errno((int)-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", ma_result_from_errno((int)-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", ma_result_from_errno((int)-resultALSA)); + } + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_snd_pcm_sframes_t resultALSA; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + for (;;) { + resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*printf("TRACE: EGAIN (write)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + #if defined(MA_DEBUG_OUTPUT) + printf("TRACE: EPIPE (write)\n"); + #endif + + /* Underrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE); + if (resultALSA < 0) { /* MA_TRUE=silent (don't print anything on error). */ + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", ma_result_from_errno((int)-resultALSA)); + } + + /* + In my testing I have had a situation where writei() does not automatically restart the device even though I've set it + up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of + frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure + if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't + quite right here. + */ + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", ma_result_from_errno((int)-resultALSA)); + } + + resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to device after underrun.", ma_result_from_errno((int)-resultALSA)); + } + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_main_loop__alsa(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + int resultALSA; + ma_bool32 exitLoop = MA_FALSE; + + MA_ASSERT(pDevice != NULL); + + /* Capture devices need to be started immediately. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device in preparation for reading.", ma_result_from_errno(-resultALSA)); + } + } + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + if (pDevice->alsa.isUsingMMapCapture || pDevice->alsa.isUsingMMapPlayback) { + /* MMAP */ + return MA_INVALID_OPERATION; /* Not yet implemented. */ + } else { + /* readi() and writei() */ + + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__alsa(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__alsa(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__alsa()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } + } break; + + case ma_device_type_capture: + { + if (pDevice->alsa.isUsingMMapCapture) { + /* MMAP */ + return MA_INVALID_OPERATION; /* Not yet implemented. */ + } else { + /* readi() */ + + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__alsa(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } + } break; + + case ma_device_type_playback: + { + if (pDevice->alsa.isUsingMMapPlayback) { + /* MMAP */ + return MA_INVALID_OPERATION; /* Not yet implemented. */ + } else { + /* writei() */ + + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__alsa(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + /* Here is where the device needs to be stopped. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) { + #ifdef MA_DEBUG_OUTPUT + printf("[ALSA] Failed to prepare capture device after stopping.\n"); + #endif + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) { + #ifdef MA_DEBUG_OUTPUT + printf("[ALSA] Failed to prepare playback device after stopping.\n"); + #endif + } + } + + return result; +} + +static ma_result ma_context_uninit__alsa(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_alsa); + + /* Clean up memory for memory leak checkers. */ + ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)(); + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->alsa.asoundSO); +#endif + + ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__alsa(const ma_context_config* pConfig, ma_context* pContext) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libasoundNames[] = { + "libasound.so.2", + "libasound.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libasoundNames); ++i) { + pContext->alsa.asoundSO = ma_dlopen(pContext, libasoundNames[i]); + if (pContext->alsa.asoundSO != NULL) { + break; + } + } + + if (pContext->alsa.asoundSO == NULL) { +#ifdef MA_DEBUG_OUTPUT + printf("[ALSA] Failed to open shared object.\n"); +#endif + return MA_NO_BACKEND; + } + + pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_open"); + pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_close"); + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof"); + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_any"); + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format"); + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first"); + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask"); + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near"); + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample"); + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near"); + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near"); + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near"); + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access"); + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format"); + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels"); + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min"); + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max"); + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate"); + pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min"); + pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max"); + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size"); + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods"); + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access"); + pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params"); + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof"); + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_current"); + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary"); + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min"); + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold"); + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold"); + pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params"); + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof"); + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_test"); + pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_get_chmap"); + pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_state"); + pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_prepare"); + pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_start"); + pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drop"); + pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drain"); + pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_hint"); + pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_get_hint"); + pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_card_get_index"); + pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_free_hint"); + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_begin"); + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_commit"); + pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_recover"); + pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_readi"); + pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_writei"); + pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail"); + pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail_update"); + pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_wait"); + pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info"); + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_sizeof"); + pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_get_name"); + pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_config_update_free_global"); +#else + /* The system below is just for type safety. */ + ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open; + ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close; + ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof; + ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any; + ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format; + ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first; + ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask; + ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near; + ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample; + ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near; + ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near; + ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near; + ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access; + ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format; + ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels; + ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min; + ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max; + ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate; + ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min; + ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max; + ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size; + ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods; + ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access; + ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params; + ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof; + ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current; + ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary; + ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min; + ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold; + ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold; + ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params; + ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof; + ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test; + ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap; + ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state; + ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare; + ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start; + ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop; + ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain; + ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint; + ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint; + ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index; + ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint; + ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin; + ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit; + ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover; + ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi; + ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei; + ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail; + ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update; + ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait; + ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info; + ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof; + ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name; + ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global; + + pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open; + pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close; + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof; + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any; + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format; + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first; + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask; + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near; + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample; + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near; + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near; + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near; + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access; + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format; + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels; + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min; + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max; + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate; + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size; + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods; + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access; + pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params; + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof; + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current; + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary; + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min; + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold; + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold; + pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params; + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof; + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test; + pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap; + pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state; + pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare; + pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start; + pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop; + pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain; + pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint; + pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint; + pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index; + pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint; + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin; + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit; + pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover; + pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi; + pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei; + pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail; + pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update; + pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait; + pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info; + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof; + pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name; + pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global; +#endif + + pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration; + + if (ma_mutex_init(pContext, &pContext->alsa.internalDeviceEnumLock) != MA_SUCCESS) { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration.", MA_ERROR); + } + + pContext->onUninit = ma_context_uninit__alsa; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__alsa; + pContext->onEnumDevices = ma_context_enumerate_devices__alsa; + pContext->onGetDeviceInfo = ma_context_get_device_info__alsa; + pContext->onDeviceInit = ma_device_init__alsa; + pContext->onDeviceUninit = ma_device_uninit__alsa; + pContext->onDeviceStart = NULL; /* Not used. Started in the main loop. */ + pContext->onDeviceStop = NULL; /* Not used. Started in the main loop. */ + pContext->onDeviceMainLoop = ma_device_main_loop__alsa; + + return MA_SUCCESS; +} +#endif /* ALSA */ + + + +/****************************************************************************** + +PulseAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_PULSEAUDIO +/* +It is assumed pulseaudio.h is available when compile-time linking is being used. We use this for type safety when using +compile time linking (we don't have this luxury when using runtime linking without headers). + +When using compile time linking, each of our ma_* equivalents should use the sames types as defined by the header. The +reason for this is that it allow us to take advantage of proper type safety. +*/ +#ifdef MA_NO_RUNTIME_LINKING +#include + +#define MA_PA_OK PA_OK +#define MA_PA_ERR_ACCESS PA_ERR_ACCESS +#define MA_PA_ERR_INVALID PA_ERR_INVALID +#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY + +#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX +#define MA_PA_RATE_MAX PA_RATE_MAX + +typedef pa_context_flags_t ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS +#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN +#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL + +typedef pa_stream_flags_t ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS +#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED +#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING +#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC +#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE +#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS +#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS +#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT +#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE +#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS +#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE +#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE +#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT +#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED +#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY +#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND +#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED +#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND +#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME +#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH + +typedef pa_sink_flags_t ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS +#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL +#define MA_PA_SINK_LATENCY PA_SINK_LATENCY +#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE +#define MA_PA_SINK_NETWORK PA_SINK_NETWORK +#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL +#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME +#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME +#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY +#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS + +typedef pa_source_flags_t ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS +#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL +#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY +#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE +#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK +#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL +#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME +#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY +#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME + +typedef pa_context_state_t ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED +#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING +#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING +#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME +#define MA_PA_CONTEXT_READY PA_CONTEXT_READY +#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED +#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED + +typedef pa_stream_state_t ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED +#define MA_PA_STREAM_CREATING PA_STREAM_CREATING +#define MA_PA_STREAM_READY PA_STREAM_READY +#define MA_PA_STREAM_FAILED PA_STREAM_FAILED +#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED + +typedef pa_operation_state_t ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING +#define MA_PA_OPERATION_DONE PA_OPERATION_DONE +#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED + +typedef pa_sink_state_t ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE +#define MA_PA_SINK_RUNNING PA_SINK_RUNNING +#define MA_PA_SINK_IDLE PA_SINK_IDLE +#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED + +typedef pa_source_state_t ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE +#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING +#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE +#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED + +typedef pa_seek_mode_t ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE +#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE +#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ +#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END + +typedef pa_channel_position_t ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID +#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT +#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0 +#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1 +#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2 +#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3 +#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4 +#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5 +#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6 +#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7 +#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8 +#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9 +#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10 +#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11 +#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12 +#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13 +#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14 +#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15 +#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16 +#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17 +#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18 +#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19 +#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20 +#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21 +#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22 +#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23 +#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24 +#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25 +#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26 +#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27 +#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28 +#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29 +#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30 +#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER + +typedef pa_channel_map_def_t ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF +#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA +#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX +#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX +#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS +#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT + +typedef pa_sample_format_t ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID +#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8 +#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW +#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW +#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE +#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE +#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE +#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE +#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE +#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE +#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE +#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE +#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE +#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE + +typedef pa_mainloop ma_pa_mainloop; +typedef pa_mainloop_api ma_pa_mainloop_api; +typedef pa_context ma_pa_context; +typedef pa_operation ma_pa_operation; +typedef pa_stream ma_pa_stream; +typedef pa_spawn_api ma_pa_spawn_api; +typedef pa_buffer_attr ma_pa_buffer_attr; +typedef pa_channel_map ma_pa_channel_map; +typedef pa_cvolume ma_pa_cvolume; +typedef pa_sample_spec ma_pa_sample_spec; +typedef pa_sink_info ma_pa_sink_info; +typedef pa_source_info ma_pa_source_info; + +typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t; +typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t; +typedef pa_source_info_cb_t ma_pa_source_info_cb_t; +typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t; +typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t; +typedef pa_free_cb_t ma_pa_free_cb_t; +#else +#define MA_PA_OK 0 +#define MA_PA_ERR_ACCESS 1 +#define MA_PA_ERR_INVALID 2 +#define MA_PA_ERR_NOENTITY 5 + +#define MA_PA_CHANNELS_MAX 32 +#define MA_PA_RATE_MAX 384000 + +typedef int ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS 0x00000000 +#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001 +#define MA_PA_CONTEXT_NOFAIL 0x00000002 + +typedef int ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS 0x00000000 +#define MA_PA_STREAM_START_CORKED 0x00000001 +#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002 +#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004 +#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008 +#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010 +#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020 +#define MA_PA_STREAM_FIX_FORMAT 0x00000040 +#define MA_PA_STREAM_FIX_RATE 0x00000080 +#define MA_PA_STREAM_FIX_CHANNELS 0x00000100 +#define MA_PA_STREAM_DONT_MOVE 0x00000200 +#define MA_PA_STREAM_VARIABLE_RATE 0x00000400 +#define MA_PA_STREAM_PEAK_DETECT 0x00000800 +#define MA_PA_STREAM_START_MUTED 0x00001000 +#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000 +#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000 +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000 +#define MA_PA_STREAM_START_UNMUTED 0x00010000 +#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000 +#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000 +#define MA_PA_STREAM_PASSTHROUGH 0x00080000 + +typedef int ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS 0x00000000 +#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SINK_LATENCY 0x00000002 +#define MA_PA_SINK_HARDWARE 0x00000004 +#define MA_PA_SINK_NETWORK 0x00000008 +#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SINK_FLAT_VOLUME 0x00000040 +#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080 +#define MA_PA_SINK_SET_FORMATS 0x00000100 + +typedef int ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS 0x00000000 +#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SOURCE_LATENCY 0x00000002 +#define MA_PA_SOURCE_HARDWARE 0x00000004 +#define MA_PA_SOURCE_NETWORK 0x00000008 +#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040 +#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080 + +typedef int ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED 0 +#define MA_PA_CONTEXT_CONNECTING 1 +#define MA_PA_CONTEXT_AUTHORIZING 2 +#define MA_PA_CONTEXT_SETTING_NAME 3 +#define MA_PA_CONTEXT_READY 4 +#define MA_PA_CONTEXT_FAILED 5 +#define MA_PA_CONTEXT_TERMINATED 6 + +typedef int ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED 0 +#define MA_PA_STREAM_CREATING 1 +#define MA_PA_STREAM_READY 2 +#define MA_PA_STREAM_FAILED 3 +#define MA_PA_STREAM_TERMINATED 4 + +typedef int ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING 0 +#define MA_PA_OPERATION_DONE 1 +#define MA_PA_OPERATION_CANCELLED 2 + +typedef int ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE -1 +#define MA_PA_SINK_RUNNING 0 +#define MA_PA_SINK_IDLE 1 +#define MA_PA_SINK_SUSPENDED 2 + +typedef int ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE -1 +#define MA_PA_SOURCE_RUNNING 0 +#define MA_PA_SOURCE_IDLE 1 +#define MA_PA_SOURCE_SUSPENDED 2 + +typedef int ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE 0 +#define MA_PA_SEEK_ABSOLUTE 1 +#define MA_PA_SEEK_RELATIVE_ON_READ 2 +#define MA_PA_SEEK_RELATIVE_END 3 + +typedef int ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID -1 +#define MA_PA_CHANNEL_POSITION_MONO 0 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2 +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3 +#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4 +#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5 +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6 +#define MA_PA_CHANNEL_POSITION_LFE 7 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9 +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10 +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11 +#define MA_PA_CHANNEL_POSITION_AUX0 12 +#define MA_PA_CHANNEL_POSITION_AUX1 13 +#define MA_PA_CHANNEL_POSITION_AUX2 14 +#define MA_PA_CHANNEL_POSITION_AUX3 15 +#define MA_PA_CHANNEL_POSITION_AUX4 16 +#define MA_PA_CHANNEL_POSITION_AUX5 17 +#define MA_PA_CHANNEL_POSITION_AUX6 18 +#define MA_PA_CHANNEL_POSITION_AUX7 19 +#define MA_PA_CHANNEL_POSITION_AUX8 20 +#define MA_PA_CHANNEL_POSITION_AUX9 21 +#define MA_PA_CHANNEL_POSITION_AUX10 22 +#define MA_PA_CHANNEL_POSITION_AUX11 23 +#define MA_PA_CHANNEL_POSITION_AUX12 24 +#define MA_PA_CHANNEL_POSITION_AUX13 25 +#define MA_PA_CHANNEL_POSITION_AUX14 26 +#define MA_PA_CHANNEL_POSITION_AUX15 27 +#define MA_PA_CHANNEL_POSITION_AUX16 28 +#define MA_PA_CHANNEL_POSITION_AUX17 29 +#define MA_PA_CHANNEL_POSITION_AUX18 30 +#define MA_PA_CHANNEL_POSITION_AUX19 31 +#define MA_PA_CHANNEL_POSITION_AUX20 32 +#define MA_PA_CHANNEL_POSITION_AUX21 33 +#define MA_PA_CHANNEL_POSITION_AUX22 34 +#define MA_PA_CHANNEL_POSITION_AUX23 35 +#define MA_PA_CHANNEL_POSITION_AUX24 36 +#define MA_PA_CHANNEL_POSITION_AUX25 37 +#define MA_PA_CHANNEL_POSITION_AUX26 38 +#define MA_PA_CHANNEL_POSITION_AUX27 39 +#define MA_PA_CHANNEL_POSITION_AUX28 40 +#define MA_PA_CHANNEL_POSITION_AUX29 41 +#define MA_PA_CHANNEL_POSITION_AUX30 42 +#define MA_PA_CHANNEL_POSITION_AUX31 43 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50 +#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE + +typedef int ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF 0 +#define MA_PA_CHANNEL_MAP_ALSA 1 +#define MA_PA_CHANNEL_MAP_AUX 2 +#define MA_PA_CHANNEL_MAP_WAVEEX 3 +#define MA_PA_CHANNEL_MAP_OSS 4 +#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF + +typedef int ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID -1 +#define MA_PA_SAMPLE_U8 0 +#define MA_PA_SAMPLE_ALAW 1 +#define MA_PA_SAMPLE_ULAW 2 +#define MA_PA_SAMPLE_S16LE 3 +#define MA_PA_SAMPLE_S16BE 4 +#define MA_PA_SAMPLE_FLOAT32LE 5 +#define MA_PA_SAMPLE_FLOAT32BE 6 +#define MA_PA_SAMPLE_S32LE 7 +#define MA_PA_SAMPLE_S32BE 8 +#define MA_PA_SAMPLE_S24LE 9 +#define MA_PA_SAMPLE_S24BE 10 +#define MA_PA_SAMPLE_S24_32LE 11 +#define MA_PA_SAMPLE_S24_32BE 12 + +typedef struct ma_pa_mainloop ma_pa_mainloop; +typedef struct ma_pa_mainloop_api ma_pa_mainloop_api; +typedef struct ma_pa_context ma_pa_context; +typedef struct ma_pa_operation ma_pa_operation; +typedef struct ma_pa_stream ma_pa_stream; +typedef struct ma_pa_spawn_api ma_pa_spawn_api; + +typedef struct +{ + ma_uint32 maxlength; + ma_uint32 tlength; + ma_uint32 prebuf; + ma_uint32 minreq; + ma_uint32 fragsize; +} ma_pa_buffer_attr; + +typedef struct +{ + ma_uint8 channels; + ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX]; +} ma_pa_channel_map; + +typedef struct +{ + ma_uint8 channels; + ma_uint32 values[MA_PA_CHANNELS_MAX]; +} ma_pa_cvolume; + +typedef struct +{ + ma_pa_sample_format_t format; + ma_uint32 rate; + ma_uint8 channels; +} ma_pa_sample_spec; + +typedef struct +{ + const char* name; + ma_uint32 index; + const char* description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_source; + const char* monitor_source_name; + ma_uint64 latency; + const char* driver; + ma_pa_sink_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_sink_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_sink_info; + +typedef struct +{ + const char *name; + ma_uint32 index; + const char *description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_of_sink; + const char *monitor_of_sink_name; + ma_uint64 latency; + const char *driver; + ma_pa_source_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_source_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_source_info; + +typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata); +typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata); +typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata); +typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata); +typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata); +typedef void (* ma_pa_free_cb_t) (void* p); +#endif + + +typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) (); +typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m); +typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m); +typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval); +typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m); +typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name); +typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c); +typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api); +typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c); +typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata); +typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata); +typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o); +typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o); +typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def); +typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m); +typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss); +typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map); +typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s); +typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream); +typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags); +typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s); +typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s); +typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s); +typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s); +typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata); +typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s); +typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes); +typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek); +typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes); +typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s); + +typedef struct +{ + ma_uint32 count; + ma_uint32 capacity; + ma_device_info* pInfo; +} ma_pulse_device_enum_data; + +static ma_result ma_result_from_pulse(int result) +{ + switch (result) { + case MA_PA_OK: return MA_SUCCESS; + case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED; + case MA_PA_ERR_INVALID: return MA_INVALID_ARGS; + case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE; + default: return MA_ERROR; + } +} + +#if 0 +static ma_pa_sample_format_t ma_format_to_pulse(ma_format format) +{ + if (ma_is_little_endian()) { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16LE; + case ma_format_s24: return MA_PA_SAMPLE_S24LE; + case ma_format_s32: return MA_PA_SAMPLE_S32LE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE; + default: break; + } + } else { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16BE; + case ma_format_s24: return MA_PA_SAMPLE_S24BE; + case ma_format_s32: return MA_PA_SAMPLE_S32BE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case ma_format_u8: return MA_PA_SAMPLE_U8; + default: return MA_PA_SAMPLE_INVALID; + } +} +#endif + +static ma_format ma_format_from_pulse(ma_pa_sample_format_t format) +{ + if (ma_is_little_endian()) { + switch (format) { + case MA_PA_SAMPLE_S16LE: return ma_format_s16; + case MA_PA_SAMPLE_S24LE: return ma_format_s24; + case MA_PA_SAMPLE_S32LE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32; + default: break; + } + } else { + switch (format) { + case MA_PA_SAMPLE_S16BE: return ma_format_s16; + case MA_PA_SAMPLE_S24BE: return ma_format_s24; + case MA_PA_SAMPLE_S32BE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case MA_PA_SAMPLE_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) +{ + switch (position) + { + case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE; + case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0; + case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1; + case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2; + case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3; + case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4; + case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5; + case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6; + case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7; + case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8; + case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9; + case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10; + case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11; + case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12; + case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13; + case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14; + case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15; + case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16; + case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17; + case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18; + case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19; + case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20; + case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21; + case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22; + case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23; + case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24; + case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25; + case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26; + case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27; + case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28; + case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29; + case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30; + case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31; + case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + default: return MA_CHANNEL_NONE; + } +} + +#if 0 +static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) +{ + switch (position) + { + case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID; + case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER; + case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE; + case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT; + case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER; + case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT; + case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18; + case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19; + case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20; + case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21; + case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22; + case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23; + case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24; + case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25; + case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26; + case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27; + case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28; + case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29; + case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30; + case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31; + default: return (ma_pa_channel_position_t)position; + } +} +#endif + +static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMainLoop, ma_pa_operation* pOP) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMainLoop != NULL); + MA_ASSERT(pOP != NULL); + + while (((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP) == MA_PA_OPERATION_RUNNING) { + int error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL); + if (error < 0) { + return ma_result_from_pulse(error); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device__wait_for_operation__pulse(ma_device* pDevice, ma_pa_operation* pOP) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pOP != NULL); + + return ma_wait_for_operation__pulse(pDevice->pContext, (ma_pa_mainloop*)pDevice->pulse.pMainLoop, pOP); +} + + +static ma_bool32 ma_context_is_device_id_equal__pulse(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->pulse, pID1->pulse) == 0; +} + + +typedef struct +{ + ma_context* pContext; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 isTerminated; +} ma_context_enumerate_devices_callback_data__pulse; + +static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSinkInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSinkInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1); + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSinkInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSinkInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSinkInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1); + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + ma_context_enumerate_devices_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + ma_pa_mainloop* pMainLoop; + ma_pa_mainloop_api* pAPI; + ma_pa_context* pPulseContext; + int error; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + callbackData.pContext = pContext; + callbackData.callback = callback; + callbackData.pUserData = pUserData; + callbackData.isTerminated = MA_FALSE; + + pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pMainLoop == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop); + if (pAPI == NULL) { + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return MA_FAILED_TO_INIT_BACKEND; + } + + pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName); + if (pPulseContext == NULL) { + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return MA_FAILED_TO_INIT_BACKEND; + } + + error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL); + if (error != MA_PA_OK) { + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return ma_result_from_pulse(error); + } + + for (;;) { + ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext); + if (state == MA_PA_CONTEXT_READY) { + break; /* Success. */ + } + if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) { + error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL); + if (error < 0) { + result = ma_result_from_pulse(error); + goto done; + } + +#ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state); +#endif + continue; /* Keep trying. */ + } + if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) { +#ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state); +#endif + goto done; /* Failed. */ + } + } + + + /* Playback. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)(pPulseContext, ma_context_enumerate_devices_sink_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + if (result != MA_SUCCESS) { + goto done; + } + } + + + /* Capture. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)(pPulseContext, ma_context_enumerate_devices_source_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + if (result != MA_SUCCESS) { + goto done; + } + } + +done: + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return result; +} + + +typedef struct +{ + ma_device_info* pDeviceInfo; + ma_bool32 foundDevice; +} ma_context_get_device_info_callback_data__pulse; + +static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels; + pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels; + pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->formatCount = 1; + pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels; + pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels; + pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->formatCount = 1; + pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format); + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_result result = MA_SUCCESS; + ma_context_get_device_info_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + ma_pa_mainloop* pMainLoop; + ma_pa_mainloop_api* pAPI; + ma_pa_context* pPulseContext; + int error; + + MA_ASSERT(pContext != NULL); + + /* No exclusive mode with the PulseAudio backend. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + callbackData.pDeviceInfo = pDeviceInfo; + callbackData.foundDevice = MA_FALSE; + + pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pMainLoop == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop); + if (pAPI == NULL) { + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return MA_FAILED_TO_INIT_BACKEND; + } + + pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName); + if (pPulseContext == NULL) { + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return MA_FAILED_TO_INIT_BACKEND; + } + + error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL); + if (error != MA_PA_OK) { + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return ma_result_from_pulse(error); + } + + for (;;) { + ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext); + if (state == MA_PA_CONTEXT_READY) { + break; /* Success. */ + } + if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) { + error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL); + if (error < 0) { + result = ma_result_from_pulse(error); + goto done; + } + +#ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state); +#endif + continue; /* Keep trying. */ + } + if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) { +#ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state); +#endif + goto done; /* Failed. */ + } + } + + if (deviceType == ma_device_type_playback) { + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_sink_callback__pulse, &callbackData); + } else { + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_source_callback__pulse, &callbackData); + } + + if (pOP != NULL) { + ma_wait_for_operation__pulse(pContext, pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } else { + result = MA_ERROR; + goto done; + } + + if (!callbackData.foundDevice) { + result = MA_NO_DEVICE; + goto done; + } + + +done: + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + return result; +} + + +static void ma_pulse_device_state_callback(ma_pa_context* pPulseContext, void* pUserData) +{ + ma_device* pDevice; + ma_context* pContext; + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + pDevice->pulse.pulseContextState = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext); +} + +void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_sink_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + pInfoOut = (ma_pa_sink_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_source_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + pInfoOut = (ma_pa_source_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_uninit__pulse(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); +} + +static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 periodSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss) +{ + ma_pa_buffer_attr attr; + attr.maxlength = periodSizeInFrames * periods * ma_get_bytes_per_frame(ma_format_from_pulse(ss->format), ss->channels); + attr.tlength = attr.maxlength / periods; + attr.prebuf = (ma_uint32)-1; + attr.minreq = (ma_uint32)-1; + attr.fragsize = attr.maxlength / periods; + + return attr; +} + +static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap) +{ + static int g_StreamCounter = 0; + char actualStreamName[256]; + + if (pStreamName != NULL) { + ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1); + } else { + ma_strcpy_s(actualStreamName, sizeof(actualStreamName), "miniaudio:"); + ma_itoa_s(g_StreamCounter, actualStreamName + 8, sizeof(actualStreamName)-8, 10); /* 8 = strlen("miniaudio:") */ + } + g_StreamCounter += 1; + + return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap); +} + +static ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + int error = 0; + const char* devPlayback = NULL; + const char* devCapture = NULL; + ma_uint32 periodSizeInMilliseconds; + ma_pa_sink_info sinkInfo; + ma_pa_source_info sourceInfo; + ma_pa_operation* pOP = NULL; + ma_pa_sample_spec ss; + ma_pa_channel_map cmap; + ma_pa_buffer_attr attr; + const ma_pa_sample_spec* pActualSS = NULL; + const ma_pa_channel_map* pActualCMap = NULL; + const ma_pa_buffer_attr* pActualAttr = NULL; + ma_uint32 iChannel; + ma_pa_stream_flags_t streamFlags; + + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->pulse); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the PulseAudio backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL) { + devPlayback = pConfig->playback.pDeviceID->pulse; + } + if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL) { + devCapture = pConfig->capture.pDeviceID->pulse; + } + + periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + if (periodSizeInMilliseconds == 0) { + periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate); + } + + pDevice->pulse.pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pDevice->pulse.pMainLoop == NULL) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create main loop for device.", MA_FAILED_TO_INIT_BACKEND); + goto on_error0; + } + + pDevice->pulse.pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); + if (pDevice->pulse.pAPI == NULL) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve PulseAudio main loop.", MA_FAILED_TO_INIT_BACKEND); + goto on_error1; + } + + pDevice->pulse.pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)((ma_pa_mainloop_api*)pDevice->pulse.pAPI, pContext->pulse.pApplicationName); + if (pDevice->pulse.pPulseContext == NULL) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context for device.", MA_FAILED_TO_INIT_BACKEND); + goto on_error1; + } + + error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pDevice->pulse.pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL); + if (error != MA_PA_OK) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context.", ma_result_from_pulse(error)); + goto on_error2; + } + + + pDevice->pulse.pulseContextState = MA_PA_CONTEXT_UNCONNECTED; + ((ma_pa_context_set_state_callback_proc)pContext->pulse.pa_context_set_state_callback)((ma_pa_context*)pDevice->pulse.pPulseContext, ma_pulse_device_state_callback, pDevice); + + /* Wait for PulseAudio to get itself ready before returning. */ + for (;;) { + if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_READY) { + break; + } + + /* An error may have occurred. */ + if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_FAILED || pDevice->pulse.pulseContextState == MA_PA_CONTEXT_TERMINATED) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context.", MA_ERROR); + goto on_error3; + } + + error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (error < 0) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio context.", ma_result_from_pulse(error)); + goto on_error3; + } + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_info_callback, &sourceInfo); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } else { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device.", ma_result_from_pulse(error)); + goto on_error3; + } + + ss = sourceInfo.sample_spec; + cmap = sourceInfo.channel_map; + + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate); + pDevice->capture.internalPeriods = pConfig->periods; + + attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalPeriodSizeInFrames, pConfig->periods, &ss); + #ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSizeInFrames); + #endif + + pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap); + if (pDevice->pulse.pStreamCapture == NULL) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + goto on_error3; + } + + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS; + if (devCapture != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_record_proc)pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags); + if (error != MA_PA_OK) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream.", ma_result_from_pulse(error)); + goto on_error4; + } + + while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamCapture) != MA_PA_STREAM_READY) { + error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (error < 0) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio capture stream.", ma_result_from_pulse(error)); + goto on_error5; + } + } + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualSS != NULL) { + /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */ + if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) { + attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalPeriodSizeInFrames, pConfig->periods, pActualSS); + + pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &attr, NULL, NULL); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } + } + + ss = *pActualSS; + } + + pDevice->capture.internalFormat = ma_format_from_pulse(ss.format); + pDevice->capture.internalChannels = ss.channels; + pDevice->capture.internalSampleRate = ss.rate; + + /* Internal channel map. */ + pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualCMap != NULL) { + cmap = *pActualCMap; + } + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + pDevice->capture.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]); + } + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + pDevice->capture.internalPeriods = attr.maxlength / attr.fragsize; + pDevice->capture.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) / pDevice->capture.internalPeriods; + #ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSizeInFrames); + #endif + + /* Name. */ + devCapture = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (devCapture != NULL) { + ma_pa_operation* pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_name_callback, pDevice); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_info_callback, &sinkInfo); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } else { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.", ma_result_from_pulse(error)); + goto on_error3; + } + + ss = sinkInfo.sample_spec; + cmap = sinkInfo.channel_map; + + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate); + pDevice->playback.internalPeriods = pConfig->periods; + + attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalPeriodSizeInFrames, pConfig->periods, &ss); + #ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodSizeInFrames); + #endif + + pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap); + if (pDevice->pulse.pStreamPlayback == NULL) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + goto on_error3; + } + + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS; + if (devPlayback != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_playback_proc)pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL); + if (error != MA_PA_OK) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream.", ma_result_from_pulse(error)); + goto on_error6; + } + + while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamPlayback) != MA_PA_STREAM_READY) { + error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (error < 0) { + result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio playback stream.", ma_result_from_pulse(error)); + goto on_error7; + } + } + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualSS != NULL) { + /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */ + if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) { + attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalPeriodSizeInFrames, pConfig->periods, pActualSS); + + pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &attr, NULL, NULL); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } + } + + ss = *pActualSS; + } + + pDevice->playback.internalFormat = ma_format_from_pulse(ss.format); + pDevice->playback.internalChannels = ss.channels; + pDevice->playback.internalSampleRate = ss.rate; + + /* Internal channel map. */ + pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualCMap != NULL) { + cmap = *pActualCMap; + } + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + pDevice->playback.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]); + } + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + pDevice->playback.internalPeriods = attr.maxlength / attr.tlength; + pDevice->playback.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels) / pDevice->playback.internalPeriods; + #ifdef MA_DEBUG_OUTPUT + printf("[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodSizeInFrames); + #endif + + /* Name. */ + devPlayback = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (devPlayback != NULL) { + ma_pa_operation* pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_name_callback, pDevice); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } + } + } + + return MA_SUCCESS; + + +on_error7: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error6: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error5: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error4: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error3: ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext); +on_error2: ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext); +on_error1: ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); +on_error0: + return result; +} + + +static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData) +{ + ma_bool32* pIsSuccessful = (ma_bool32*)pUserData; + MA_ASSERT(pIsSuccessful != NULL); + + *pIsSuccessful = (ma_bool32)success; + + (void)pStream; /* Unused. */ +} + +static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork) +{ + ma_context* pContext = pDevice->pContext; + ma_bool32 wasSuccessful; + ma_pa_stream* pStream; + ma_pa_operation* pOP; + ma_result result; + + /* This should not be called with a duplex device type. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + wasSuccessful = MA_FALSE; + + pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback); + MA_ASSERT(pStream != NULL); + + pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful); + if (pOP == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream.", (cork == 0) ? MA_FAILED_TO_START_BACKEND_DEVICE : MA_FAILED_TO_STOP_BACKEND_DEVICE); + } + + result = ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork.", result); + } + + if (!wasSuccessful) { + if (cork) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to stop PulseAudio stream.", MA_FAILED_TO_STOP_BACKEND_DEVICE); + } else { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to start PulseAudio stream.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__pulse(ma_device* pDevice) +{ + ma_result result; + ma_bool32 wasSuccessful; + ma_pa_operation* pOP; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* The stream needs to be drained if it's a playback device. */ + pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful); + if (pOP != NULL) { + ma_device__wait_for_operation__pulse(pDevice, pOP); + ((ma_pa_operation_unref_proc)pDevice->pContext->pulse.pa_operation_unref)(pOP); + } + + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_uint32 totalFramesWritten; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + MA_ASSERT(frameCount > 0); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + totalFramesWritten = 0; + while (totalFramesWritten < frameCount) { + if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { + return MA_DEVICE_NOT_STARTED; + } + + /* Place the data into the mapped buffer if we have one. */ + if (pDevice->pulse.pMappedBufferPlayback != NULL && pDevice->pulse.mappedBufferFramesRemainingPlayback > 0) { + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityPlayback - pDevice->pulse.mappedBufferFramesRemainingPlayback; + + void* pDst = (ma_uint8*)pDevice->pulse.pMappedBufferPlayback + (mappedBufferFramesConsumed * bpf); + const void* pSrc = (const ma_uint8*)pPCMFrames + (totalFramesWritten * bpf); + ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingPlayback, (frameCount - totalFramesWritten)); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf); + + pDevice->pulse.mappedBufferFramesRemainingPlayback -= framesToCopy; + totalFramesWritten += framesToCopy; + } + + /* + Getting here means we've run out of data in the currently mapped chunk. We need to write this to the device and then try + mapping another chunk. If this fails we need to wait for space to become available. + */ + if (pDevice->pulse.mappedBufferFramesCapacityPlayback > 0 && pDevice->pulse.mappedBufferFramesRemainingPlayback == 0) { + size_t nbytes = pDevice->pulse.mappedBufferFramesCapacityPlayback * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + + int error = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, pDevice->pulse.pMappedBufferPlayback, nbytes, NULL, 0, MA_PA_SEEK_RELATIVE); + if (error < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to write data to the PulseAudio stream.", ma_result_from_pulse(error)); + } + + pDevice->pulse.pMappedBufferPlayback = NULL; + pDevice->pulse.mappedBufferFramesRemainingPlayback = 0; + pDevice->pulse.mappedBufferFramesCapacityPlayback = 0; + } + + MA_ASSERT(totalFramesWritten <= frameCount); + if (totalFramesWritten == frameCount) { + break; + } + + /* Getting here means we need to map a new buffer. If we don't have enough space we need to wait for more. */ + for (;;) { + size_t writableSizeInBytes; + + /* If the device has been corked, don't try to continue. */ + if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamPlayback)) { + break; + } + + writableSizeInBytes = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (writableSizeInBytes != (size_t)-1) { + if (writableSizeInBytes > 0) { + /* Data is avaialable. */ + size_t bytesToMap = writableSizeInBytes; + int error = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &pDevice->pulse.pMappedBufferPlayback, &bytesToMap); + if (error < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to map write buffer.", ma_result_from_pulse(error)); + } + + pDevice->pulse.mappedBufferFramesCapacityPlayback = bytesToMap / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pDevice->pulse.mappedBufferFramesRemainingPlayback = pDevice->pulse.mappedBufferFramesCapacityPlayback; + + break; + } else { + /* No data available. Need to wait for more. */ + int error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (error < 0) { + return ma_result_from_pulse(error); + } + + continue; + } + } else { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to query the stream's writable size.", MA_ERROR); + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesWritten; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_uint32 totalFramesRead; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + MA_ASSERT(frameCount > 0); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { + return MA_DEVICE_NOT_STARTED; + } + + /* + If a buffer is mapped we need to read from that first. Once it's consumed we need to drop it. Note that pDevice->pulse.pMappedBufferCapture can be null in which + case it could be a hole. In this case we just write zeros into the output buffer. + */ + if (pDevice->pulse.mappedBufferFramesRemainingCapture > 0) { + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityCapture - pDevice->pulse.mappedBufferFramesRemainingCapture; + + ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingCapture, (frameCount - totalFramesRead)); + void* pDst = (ma_uint8*)pPCMFrames + (totalFramesRead * bpf); + + /* + This little bit of logic here is specifically for PulseAudio and it's hole management. The buffer pointer will be set to NULL + when the current fragment is a hole. For a hole we just output silence. + */ + if (pDevice->pulse.pMappedBufferCapture != NULL) { + const void* pSrc = (const ma_uint8*)pDevice->pulse.pMappedBufferCapture + (mappedBufferFramesConsumed * bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf); + } else { + MA_ZERO_MEMORY(pDst, framesToCopy * bpf); + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Filling hole with silence.\n"); + #endif + } + + pDevice->pulse.mappedBufferFramesRemainingCapture -= framesToCopy; + totalFramesRead += framesToCopy; + } + + /* + Getting here means we've run out of data in the currently mapped chunk. We need to drop this from the device and then try + mapping another chunk. If this fails we need to wait for data to become available. + */ + if (pDevice->pulse.mappedBufferFramesCapacityCapture > 0 && pDevice->pulse.mappedBufferFramesRemainingCapture == 0) { + int error; + + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_drop()\n"); + #endif + + error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (error != 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to drop fragment.", ma_result_from_pulse(error)); + } + + pDevice->pulse.pMappedBufferCapture = NULL; + pDevice->pulse.mappedBufferFramesRemainingCapture = 0; + pDevice->pulse.mappedBufferFramesCapacityCapture = 0; + } + + MA_ASSERT(totalFramesRead <= frameCount); + if (totalFramesRead == frameCount) { + break; + } + + /* Getting here means we need to map a new buffer. If we don't have enough data we wait for more. */ + for (;;) { + int error; + size_t bytesMapped; + + if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { + break; + } + + /* If the device has been corked, don't try to continue. */ + if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) { + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Corked.\n"); + #endif + break; + } + + MA_ASSERT(pDevice->pulse.pMappedBufferCapture == NULL); /* <-- We're about to map a buffer which means we shouldn't have an existing mapping. */ + + error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped); + if (error < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error)); + } + + if (bytesMapped > 0) { + pDevice->pulse.mappedBufferFramesCapacityCapture = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture; + + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Mapped. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture); + #endif + + if (pDevice->pulse.pMappedBufferCapture == NULL) { + /* It's a hole. */ + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_peek(). Hole.\n"); + #endif + } + + break; + } else { + if (pDevice->pulse.pMappedBufferCapture == NULL) { + /* Nothing available yet. Need to wait for more. */ + + /* + I have had reports of a deadlock in this part of the code. I have reproduced this when using the "Built-in Audio Analogue Stereo" device without + an actual microphone connected. I'm experimenting here by not blocking in pa_mainloop_iterate() and instead sleep for a bit when there are no + dispatches. + */ + error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL); + if (error < 0) { + return ma_result_from_pulse(error); + } + + /* Sleep for a bit if nothing was dispatched. */ + if (error == 0) { + ma_sleep(1); + } + + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: No data available. Waiting. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture); + #endif + } else { + /* Getting here means we mapped 0 bytes, but have a non-NULL buffer. I don't think this should ever happen. */ + MA_ASSERT(MA_FALSE); + } + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_main_loop__pulse(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + MA_ASSERT(pDevice != NULL); + + /* The stream needs to be uncorked first. We do this at the top for both capture and playback for PulseAudio. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0); + if (result != MA_SUCCESS) { + return result; + } + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0); + if (result != MA_SUCCESS) { + return result; + } + } + + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__pulse(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__pulse(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__pulse()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__pulse(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__pulse(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + /* Here is where the device needs to be stopped. */ + ma_device_stop__pulse(pDevice); + + return result; +} + + +static ma_result ma_context_uninit__pulse(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_pulseaudio); + + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + pContext->pulse.pServerName = NULL; + + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + pContext->pulse.pApplicationName = NULL; + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->pulse.pulseSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* pContext) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libpulseNames[] = { + "libpulse.so", + "libpulse.so.0" + }; + size_t i; + + for (i = 0; i < ma_countof(libpulseNames); ++i) { + pContext->pulse.pulseSO = ma_dlopen(pContext, libpulseNames[i]); + if (pContext->pulse.pulseSO != NULL) { + break; + } + } + + if (pContext->pulse.pulseSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_new"); + pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_free"); + pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_get_api"); + pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_iterate"); + pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_wakeup"); + pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_new"); + pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_unref"); + pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_connect"); + pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_disconnect"); + pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_set_state_callback"); + pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_state"); + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_list"); + pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_list"); + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name"); + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_by_name"); + pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_unref"); + pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_get_state"); + pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_init_extend"); + pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_valid"); + pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_compatible"); + pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_new"); + pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_unref"); + pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_playback"); + pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_record"); + pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_disconnect"); + pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_state"); + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_sample_spec"); + pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_channel_map"); + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_buffer_attr"); + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_buffer_attr"); + pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_device_name"); + pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_write_callback"); + pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_read_callback"); + pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_flush"); + pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drain"); + pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_is_corked"); + pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_cork"); + pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_trigger"); + pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_begin_write"); + pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_write"); + pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_peek"); + pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drop"); + pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_writable_size"); + pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_readable_size"); +#else + /* This strange assignment system is just for type safety. */ + ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new; + ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free; + ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api; + ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate; + ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup; + ma_pa_context_new_proc _pa_context_new = pa_context_new; + ma_pa_context_unref_proc _pa_context_unref = pa_context_unref; + ma_pa_context_connect_proc _pa_context_connect = pa_context_connect; + ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect; + ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback; + ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state; + ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list; + ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list; + ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name; + ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name; + ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref; + ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state; + ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend; + ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid; + ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible; + ma_pa_stream_new_proc _pa_stream_new = pa_stream_new; + ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref; + ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback; + ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record; + ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect; + ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state; + ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec; + ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map; + ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr; + ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr; + ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name; + ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback; + ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback; + ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush; + ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain; + ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked; + ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork; + ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger; + ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write; + ma_pa_stream_write_proc _pa_stream_write = pa_stream_write; + ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek; + ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop; + ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size; + ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size; + + pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new; + pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free; + pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api; + pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate; + pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup; + pContext->pulse.pa_context_new = (ma_proc)_pa_context_new; + pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref; + pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect; + pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect; + pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback; + pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state; + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list; + pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list; + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name; + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name; + pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref; + pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state; + pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend; + pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid; + pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible; + pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new; + pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref; + pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback; + pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record; + pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect; + pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state; + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec; + pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map; + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr; + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr; + pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name; + pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback; + pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback; + pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush; + pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain; + pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked; + pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork; + pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger; + pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write; + pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write; + pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek; + pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop; + pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size; + pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size; +#endif + + pContext->onUninit = ma_context_uninit__pulse; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__pulse; + pContext->onEnumDevices = ma_context_enumerate_devices__pulse; + pContext->onGetDeviceInfo = ma_context_get_device_info__pulse; + pContext->onDeviceInit = ma_device_init__pulse; + pContext->onDeviceUninit = ma_device_uninit__pulse; + pContext->onDeviceStart = NULL; + pContext->onDeviceStop = NULL; + pContext->onDeviceMainLoop = ma_device_main_loop__pulse; + + if (pConfig->pulse.pApplicationName) { + pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks); + } + if (pConfig->pulse.pServerName) { + pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks); + } + pContext->pulse.tryAutoSpawn = pConfig->pulse.tryAutoSpawn; + + /* + Although we have found the libpulse library, it doesn't necessarily mean PulseAudio is useable. We need to initialize + and connect a dummy PulseAudio context to test PulseAudio's usability. + */ + { + ma_pa_mainloop* pMainLoop; + ma_pa_mainloop_api* pAPI; + ma_pa_context* pPulseContext; + int error; + + pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pMainLoop == NULL) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->pulse.pulseSO); + #endif + return MA_NO_BACKEND; + } + + pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop); + if (pAPI == NULL) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->pulse.pulseSO); + #endif + return MA_NO_BACKEND; + } + + pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName); + if (pPulseContext == NULL) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->pulse.pulseSO); + #endif + return MA_NO_BACKEND; + } + + error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL); + if (error != MA_PA_OK) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->pulse.pulseSO); + #endif + return MA_NO_BACKEND; + } + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); + } + + return MA_SUCCESS; +} +#endif + + +/****************************************************************************** + +JACK Backend + +******************************************************************************/ +#ifdef MA_HAS_JACK + +/* It is assumed jack.h is available when compile-time linking is being used. */ +#ifdef MA_NO_RUNTIME_LINKING +#include + +typedef jack_nframes_t ma_jack_nframes_t; +typedef jack_options_t ma_jack_options_t; +typedef jack_status_t ma_jack_status_t; +typedef jack_client_t ma_jack_client_t; +typedef jack_port_t ma_jack_port_t; +typedef JackProcessCallback ma_JackProcessCallback; +typedef JackBufferSizeCallback ma_JackBufferSizeCallback; +typedef JackShutdownCallback ma_JackShutdownCallback; +#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE +#define ma_JackNoStartServer JackNoStartServer +#define ma_JackPortIsInput JackPortIsInput +#define ma_JackPortIsOutput JackPortIsOutput +#define ma_JackPortIsPhysical JackPortIsPhysical +#else +typedef ma_uint32 ma_jack_nframes_t; +typedef int ma_jack_options_t; +typedef int ma_jack_status_t; +typedef struct ma_jack_client_t ma_jack_client_t; +typedef struct ma_jack_port_t ma_jack_port_t; +typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg); +typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg); +typedef void (* ma_JackShutdownCallback) (void* arg); +#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio" +#define ma_JackNoStartServer 1 +#define ma_JackPortIsInput 1 +#define ma_JackPortIsOutput 2 +#define ma_JackPortIsPhysical 4 +#endif + +typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...); +typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_client_name_size_proc) (); +typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg); +typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg); +typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg); +typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client); +typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client); +typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags); +typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port); +typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size); +typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port); +typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes); +typedef void (* ma_jack_free_proc) (void* ptr); + +static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient) +{ + size_t maxClientNameSize; + char clientName[256]; + ma_jack_status_t status; + ma_jack_client_t* pClient; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppClient != NULL); + + if (ppClient) { + *ppClient = NULL; + } + + maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */ + ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1); + + pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL); + if (pClient == NULL) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + if (ppClient) { + *ppClient = pClient; + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_context_is_device_id_equal__jack(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->jack == pID1->jack; +} + +static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_jack_client_t* pClient; + ma_result result; + const char** ppPorts; + + MA_ASSERT(pContext != NULL); + + /* No exclusive mode with the JACK backend. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pDeviceID != NULL && pDeviceID->jack != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Jack only supports f32 and has a specific channel count and sample rate. */ + pDeviceInfo->formatCount = 1; + pDeviceInfo->formats[0] = ma_format_f32; + + /* The channel count and sample rate can only be determined by opening the device. */ + result = ma_context_open_client__jack(pContext, &pClient); + if (result != MA_SUCCESS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", result); + } + + pDeviceInfo->minSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient); + pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; + + pDeviceInfo->minChannels = 0; + pDeviceInfo->maxChannels = 0; + + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput)); + if (ppPorts == NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + while (ppPorts[pDeviceInfo->minChannels] != NULL) { + pDeviceInfo->minChannels += 1; + pDeviceInfo->maxChannels += 1; + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + + (void)pContext; + return MA_SUCCESS; +} + + +static void ma_device_uninit__jack(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->jack.pClient != NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->jack.duplexRB); + } +} + +static void ma_device__jack_shutdown_callback(void* pUserData) +{ + /* JACK died. Stop the device. */ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_device_stop(pDevice); +} + +static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferCapture = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + return 0; +} + +static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice; + ma_context* pContext; + ma_uint32 iChannel; + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + /* Channels need to be interleaved. */ + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsCapture[iChannel], frameCount); + if (pSrc != NULL) { + float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += pDevice->capture.internalChannels; + pSrc += 1; + } + } + } + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture, &pDevice->jack.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback, &pDevice->jack.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback); + } + + /* Channels need to be deinterleaved. */ + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[iChannel], frameCount); + if (pDst != NULL) { + const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += 1; + pSrc += pDevice->playback.internalChannels; + } + } + } + } + + return 0; +} + +static ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_uint32 periods; + ma_uint32 periodSizeInFrames; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* Only supporting default devices with JACK. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL && pConfig->playback.pDeviceID->jack != 0) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL && pConfig->capture.pDeviceID->jack != 0)) { + return MA_NO_DEVICE; + } + + /* No exclusive mode with the JACK backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Open the client. */ + result = ma_context_open_client__jack(pContext, (ma_jack_client_t**)&pDevice->jack.pClient); + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", result); + } + + /* Callbacks. */ + if (((ma_jack_set_process_callback_proc)pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + if (((ma_jack_set_buffer_size_callback_proc)pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + ((ma_jack_on_shutdown_proc)pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice); + + + /* The buffer size in frames can change. */ + periods = pConfig->periods; + periodSizeInFrames = ((ma_jack_get_buffer_size_proc)pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + const char** ppPorts; + + pDevice->capture.internalFormat = ma_format_f32; + pDevice->capture.internalChannels = 0; + pDevice->capture.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppPorts == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + while (ppPorts[pDevice->capture.internalChannels] != NULL) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "capture"); + ma_itoa_s((int)pDevice->capture.internalChannels, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */ + + pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0); + if (pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] == NULL) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + pDevice->capture.internalChannels += 1; + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + + pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->capture.internalPeriods = periods; + + pDevice->jack.pIntermediaryBufferCapture = (float*)ma__calloc_from_callbacks(pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels), &pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferCapture == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + const char** ppPorts; + + pDevice->playback.internalFormat = ma_format_f32; + pDevice->playback.internalChannels = 0; + pDevice->playback.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppPorts == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + while (ppPorts[pDevice->playback.internalChannels] != NULL) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "playback"); + ma_itoa_s((int)pDevice->playback.internalChannels, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */ + + pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0); + if (pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] == NULL) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + pDevice->playback.internalChannels += 1; + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + + pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->playback.internalPeriods = periods; + + pDevice->jack.pIntermediaryBufferPlayback = (float*)ma__calloc_from_callbacks(pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels), &pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferPlayback == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + if (pDevice->type == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods); + result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->jack.duplexRB); + if (result != MA_SUCCESS) { + ma_device_uninit__jack(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to initialize ring buffer.", result); + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->jack.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->jack.duplexRB, marginSizeInFrames, pMarginData); + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + int resultJACK; + size_t i; + + resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient); + if (resultJACK != 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR); + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsCapture[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR); + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR); + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR); + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + ma_stop_proc onStop; + + if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client.", MA_ERROR); + } + + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__jack(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_jack); + + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); + pContext->jack.pClientName = NULL; + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->jack.jackSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pContext) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libjackNames[] = { +#ifdef MA_WIN32 + "libjack.dll" +#else + "libjack.so", + "libjack.so.0" +#endif + }; + size_t i; + + for (i = 0; i < ma_countof(libjackNames); ++i) { + pContext->jack.jackSO = ma_dlopen(pContext, libjackNames[i]); + if (pContext->jack.jackSO != NULL) { + break; + } + } + + if (pContext->jack.jackSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->jack.jack_client_open = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_open"); + pContext->jack.jack_client_close = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_close"); + pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_name_size"); + pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_process_callback"); + pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_buffer_size_callback"); + pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_on_shutdown"); + pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_sample_rate"); + pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_buffer_size"); + pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_ports"); + pContext->jack.jack_activate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_activate"); + pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_deactivate"); + pContext->jack.jack_connect = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_connect"); + pContext->jack.jack_port_register = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_register"); + pContext->jack.jack_port_name = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_name"); + pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_get_buffer"); + pContext->jack.jack_free = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_free"); +#else + /* + This strange assignment system is here just to ensure type safety of miniaudio's function pointer + types. If anything differs slightly the compiler should throw a warning. + */ + ma_jack_client_open_proc _jack_client_open = jack_client_open; + ma_jack_client_close_proc _jack_client_close = jack_client_close; + ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size; + ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback; + ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback; + ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown; + ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate; + ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size; + ma_jack_get_ports_proc _jack_get_ports = jack_get_ports; + ma_jack_activate_proc _jack_activate = jack_activate; + ma_jack_deactivate_proc _jack_deactivate = jack_deactivate; + ma_jack_connect_proc _jack_connect = jack_connect; + ma_jack_port_register_proc _jack_port_register = jack_port_register; + ma_jack_port_name_proc _jack_port_name = jack_port_name; + ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer; + ma_jack_free_proc _jack_free = jack_free; + + pContext->jack.jack_client_open = (ma_proc)_jack_client_open; + pContext->jack.jack_client_close = (ma_proc)_jack_client_close; + pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size; + pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback; + pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback; + pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown; + pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate; + pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size; + pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports; + pContext->jack.jack_activate = (ma_proc)_jack_activate; + pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate; + pContext->jack.jack_connect = (ma_proc)_jack_connect; + pContext->jack.jack_port_register = (ma_proc)_jack_port_register; + pContext->jack.jack_port_name = (ma_proc)_jack_port_name; + pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer; + pContext->jack.jack_free = (ma_proc)_jack_free; +#endif + + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__jack; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__jack; + pContext->onEnumDevices = ma_context_enumerate_devices__jack; + pContext->onGetDeviceInfo = ma_context_get_device_info__jack; + pContext->onDeviceInit = ma_device_init__jack; + pContext->onDeviceUninit = ma_device_uninit__jack; + pContext->onDeviceStart = ma_device_start__jack; + pContext->onDeviceStop = ma_device_stop__jack; + + if (pConfig->jack.pClientName != NULL) { + pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks); + } + pContext->jack.tryStartServer = pConfig->jack.tryStartServer; + + /* + Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting + a temporary client. + */ + { + ma_jack_client_t* pDummyClient; + ma_result result = ma_context_open_client__jack(pContext, &pDummyClient); + if (result != MA_SUCCESS) { + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(pContext, pContext->jack.jackSO); + #endif + return MA_NO_BACKEND; + } + + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient); + } + + return MA_SUCCESS; +} +#endif /* JACK */ + + + +/****************************************************************************** + +Core Audio Backend + +******************************************************************************/ +#ifdef MA_HAS_COREAUDIO +#include + +#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1 + #define MA_APPLE_MOBILE + #if defined(TARGET_OS_TV) && TARGET_OS_TV == 1 + #define MA_APPLE_TV + #endif + #if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 + #define MA_APPLE_WATCH + #endif +#else + #define MA_APPLE_DESKTOP +#endif + +#if defined(MA_APPLE_DESKTOP) +#include +#else +#include +#endif + +#include + +/* CoreFoundation */ +typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding); +typedef void (* ma_CFRelease_proc)(CFTypeRef cf); + +/* CoreAudio */ +#if defined(MA_APPLE_DESKTOP) +typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData); +typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize); +typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData); +typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +typedef OSStatus (* ma_AudioObjectRemovePropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +#endif + +/* AudioToolbox */ +typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc); +typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance); +typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance); +typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData); +typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable); +typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize); +typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize); +typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData); + + +#define MA_COREAUDIO_OUTPUT_BUS 0 +#define MA_COREAUDIO_INPUT_BUS 1 + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit); +#endif + +/* +Core Audio + +So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation +apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose +needing to figure out how this darn thing works, I'm going to outline a few things here. + +Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be +able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen +that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent +and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the +distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API. + +Most (all?) functions in the AudioObject API take a AudioObjectID as it's input. This is the device identifier. When +retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific +data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the +devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be +the central APIs for retrieving information about the system and specific devices. + +To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a +structure with three variables and is used to identify which property you are getting or setting. The first is the "selector" +which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is +typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and +kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to +kAudioObjectPropertyElementMaster in miniaudio's case. I don't know of any cases where this would be set to anything different. + +Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size +of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property +address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the +size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of +AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count. +*/ + +static ma_result ma_result_from_OSStatus(OSStatus status) +{ + switch (status) + { + case noErr: return MA_SUCCESS; + #if defined(MA_APPLE_DESKTOP) + case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED; + case kAudioHardwareUnspecifiedError: return MA_ERROR; + case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS; + case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION; + case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION; + case kAudioHardwareBadObjectError: return MA_INVALID_ARGS; + case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS; + case kAudioHardwareBadStreamError: return MA_INVALID_ARGS; + case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION; + case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED; + case kAudioDevicePermissionsError: return MA_ACCESS_DENIED; + #endif + default: return MA_ERROR; + } +} + +#if 0 +static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) +{ + switch (bit) + { + case kAudioChannelBit_Left: return MA_CHANNEL_LEFT; + case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return MA_CHANNEL_NONE; + } +} +#endif + +static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut) +{ + MA_ASSERT(pDescription != NULL); + MA_ASSERT(pFormatOut != NULL); + + *pFormatOut = ma_format_unknown; /* Safety. */ + + /* There's a few things miniaudio doesn't support. */ + if (pDescription->mFormatID != kAudioFormatLinearPCM) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We don't support any non-packed formats that are aligned high. */ + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Only supporting native-endian. */ + if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */ + /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + }*/ + + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) { + if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_f32; + return MA_SUCCESS; + } + } else { + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) { + if (pDescription->mBitsPerChannel == 16) { + *pFormatOut = ma_format_s16; + return MA_SUCCESS; + } else if (pDescription->mBitsPerChannel == 24) { + if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) { + *pFormatOut = ma_format_s24; + return MA_SUCCESS; + } else { + if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) { + /* TODO: Implement ma_format_s24_32. */ + /**pFormatOut = ma_format_s24_32;*/ + /*return MA_SUCCESS;*/ + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_s32; + return MA_SUCCESS; + } + } else { + if (pDescription->mBitsPerChannel == 8) { + *pFormatOut = ma_format_u8; + return MA_SUCCESS; + } + } + } + + /* Getting here means the format is not supported. */ + return MA_FORMAT_NOT_SUPPORTED; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) +{ + switch (label) + { + case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE; + case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO; + case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO; + case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO; + case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE; + case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE; + case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE; + + #if 0 /* Introduced in a later version of macOS. */ + case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE; + case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE; + #endif + + default: return MA_CHANNEL_NONE; + } +} + +static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + MA_ASSERT(pChannelLayout != NULL); + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + UInt32 iChannel; + for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions; ++iChannel) { + channelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel); + } + } else +#if 0 + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + /* This is the same kind of system that's used by Windows audio APIs. */ + UInt32 iChannel = 0; + UInt32 iBit; + AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap; + for (iBit = 0; iBit < 32; ++iBit) { + AudioChannelBitmap bit = bitmap & (1 << iBit); + if (bit != 0) { + channelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit); + } + } + } else +#endif + { + /* + Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should + be updated to determine the mapping based on the tag. + */ + UInt32 channelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); + switch (pChannelLayout->mChannelLayoutTag) + { + case kAudioChannelLayoutTag_Mono: + case kAudioChannelLayoutTag_Stereo: + case kAudioChannelLayoutTag_StereoHeadphones: + case kAudioChannelLayoutTag_MatrixStereo: + case kAudioChannelLayoutTag_MidSide: + case kAudioChannelLayoutTag_XY: + case kAudioChannelLayoutTag_Binaural: + case kAudioChannelLayoutTag_Ambisonic_B_Format: + { + ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap); + } break; + + case kAudioChannelLayoutTag_Octagonal: + { + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + } /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Hexagonal: + { + channelMap[5] = MA_CHANNEL_BACK_CENTER; + } /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Pentagonal: + { + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } /* Intentional fallghrough. */ + case kAudioChannelLayoutTag_Quadraphonic: + { + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + channelMap[0] = MA_CHANNEL_LEFT; + } break; + + /* TODO: Add support for more tags here. */ + + default: + { + ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap); + } break; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddressDevices; + UInt32 deviceObjectsDataSize; + OSStatus status; + AudioObjectID* pDeviceObjectIDs; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceCount != NULL); + MA_ASSERT(ppDeviceObjectIDs != NULL); + + /* Safety. */ + *pDeviceCount = 0; + *ppDeviceObjectIDs = NULL; + + propAddressDevices.mSelector = kAudioHardwarePropertyDevices; + propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDevices.mElement = kAudioObjectPropertyElementMaster; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks); + if (pDeviceObjectIDs == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs); + if (status != noErr) { + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID); + *ppDeviceObjectIDs = pDeviceObjectIDs; + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceUID; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + dataSize = sizeof(*pUID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + CFStringRef uid; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid); + if (result != MA_SUCCESS) { + return result; + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(uid); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + AudioObjectPropertyAddress propAddress; + CFStringRef deviceName = NULL; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + dataSize = sizeof(deviceName); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(deviceName); + return MA_SUCCESS; +} + +static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioBufferList* pBufferList; + ma_bool32 isSupported; + + MA_ASSERT(pContext != NULL); + + /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */ + propAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + propAddress.mScope = scope; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return MA_FALSE; + } + + pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(dataSize, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList); + if (status != noErr) { + ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks); + return MA_FALSE; + } + + isSupported = MA_FALSE; + if (pBufferList->mNumberBuffers > 0) { + isSupported = MA_TRUE; + } + + ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks); + return isSupported; +} + +static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput); +} + +static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput); +} + + +static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioStreamRangedDescription* pDescriptions; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDescriptionCount != NULL); + MA_ASSERT(ppDescriptions != NULL); + + /* + TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My + MacBook Pro uses s24/32 format, however, which miniaudio does not currently support. + */ + propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/ + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pDescriptions == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions); + if (status != noErr) { + ma_free(pDescriptions, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDescriptionCount = dataSize / sizeof(*pDescriptions); + *ppDescriptions = pDescriptions; + return MA_SUCCESS; +} + + +static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppChannelLayout != NULL); + + *ppChannelLayout = NULL; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout); + if (status != noErr) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *ppChannelLayout = pChannelLayout; + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pChannelCount != NULL); + + *pChannelCount = 0; /* Safety. */ + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; + } + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + *pChannelCount = pChannelLayout->mNumberChannelDescriptions; + } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap); + } else { + *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; /* Rather than always failing here, would it be more robust to simply assume a default? */ + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); + if (result != MA_SUCCESS) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; +} +#endif + +static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioValueRange* pSampleRateRanges; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateRangesCount != NULL); + MA_ASSERT(ppSampleRateRanges != NULL); + + /* Safety. */ + *pSampleRateRangesCount = 0; + *ppSampleRateRanges = NULL; + + propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pSampleRateRanges == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges); + if (status != noErr) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges); + *ppSampleRateRanges = pSampleRateRanges; + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut) +{ + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateOut != NULL); + + *pSampleRateOut = 0; /* Safety. */ + + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + if (sampleRateRangeCount == 0) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_ERROR; /* Should never hit this case should we? */ + } + + if (sampleRateIn == 0) { + /* Search in order of miniaudio's preferred priority. */ + UInt32 iMALSampleRate; + for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) { + ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate]; + UInt32 iCASampleRate; + for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) { + AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate]; + if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) { + *pSampleRateOut = malSampleRate; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + + /* + If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this + case we just fall back to the first one reported by Core Audio. + */ + MA_ASSERT(sampleRateRangeCount > 0); + + *pSampleRateOut = pSampleRateRanges[0].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + /* Find the closest match to this sample rate. */ + UInt32 currentAbsoluteDifference = INT32_MAX; + UInt32 iCurrentClosestRange = (UInt32)-1; + UInt32 iRange; + for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) { + if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) { + *pSampleRateOut = sampleRateIn; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + UInt32 absoluteDifference; + if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) { + absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn; + } else { + absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum; + } + + if (currentAbsoluteDifference > absoluteDifference) { + currentAbsoluteDifference = absoluteDifference; + iCurrentClosestRange = iRange; + } + } + } + + MA_ASSERT(iCurrentClosestRange != (UInt32)-1); + + *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + + /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */ + /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/ + /*return MA_ERROR;*/ +} +#endif + +static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut) +{ + AudioObjectPropertyAddress propAddress; + AudioValueRange bufferSizeRange; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pBufferSizeInFramesOut != NULL); + + *pBufferSizeInFramesOut = 0; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + dataSize = sizeof(bufferSizeRange); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + /* This is just a clamp. */ + if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum; + } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum; + } else { + *pBufferSizeInFramesOut = bufferSizeInFramesIn; + } + + return MA_SUCCESS; +} + +static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pPeriodSizeInOut) +{ + ma_result result; + ma_uint32 chosenBufferSizeInFrames; + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pPeriodSizeInOut, &chosenBufferSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } + + /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */ + propAddress.mSelector = kAudioDevicePropertyBufferFrameSize; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames); + + /* Get the actual size of the buffer. */ + dataSize = sizeof(*pPeriodSizeInOut); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + *pPeriodSizeInOut = chosenBufferSizeInFrames; + return MA_SUCCESS; +} + + +static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); + + /* Safety. */ + *pDeviceObjectID = 0; + + if (pDeviceID == NULL) { + /* Default device. */ + AudioObjectPropertyAddress propAddressDefaultDevice; + UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID); + AudioObjectID defaultDeviceObjectID; + OSStatus status; + + propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDefaultDevice.mElement = kAudioObjectPropertyElementMaster; + if (deviceType == ma_device_type_playback) { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + } else { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice; + } + + defaultDeviceObjectIDSize = sizeof(AudioObjectID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID); + if (status == noErr) { + *pDeviceObjectID = defaultDeviceObjectID; + return MA_SUCCESS; + } + } else { + /* Explicit device. */ + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + ma_result result; + UInt32 iDevice; + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + + char uid[256]; + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) { + continue; + } + + if (deviceType == ma_device_type_playback) { + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } else { + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + } + + /* If we get here it means we couldn't find the device. */ + return MA_NO_DEVICE; +} + + +static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_bool32 usingDefaultFormat, ma_bool32 usingDefaultChannels, ma_bool32 usingDefaultSampleRate, AudioStreamBasicDescription* pFormat) +{ + UInt32 deviceFormatDescriptionCount; + AudioStreamRangedDescription* pDeviceFormatDescriptions; + ma_result result; + ma_uint32 desiredSampleRate; + ma_uint32 desiredChannelCount; + ma_format desiredFormat; + AudioStreamBasicDescription bestDeviceFormatSoFar; + ma_bool32 hasSupportedFormat; + UInt32 iFormat; + + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + desiredSampleRate = sampleRate; + if (usingDefaultSampleRate) { + /* + When using the device's default sample rate, we get the highest priority standard rate supported by the device. Otherwise + we just use the pre-set rate. + */ + ma_uint32 iStandardRate; + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + ma_bool32 foundRate = MA_FALSE; + UInt32 iDeviceRate; + + for (iDeviceRate = 0; iDeviceRate < deviceFormatDescriptionCount; ++iDeviceRate) { + ma_uint32 deviceRate = (ma_uint32)pDeviceFormatDescriptions[iDeviceRate].mFormat.mSampleRate; + + if (deviceRate == standardRate) { + desiredSampleRate = standardRate; + foundRate = MA_TRUE; + break; + } + } + + if (foundRate) { + break; + } + } + } + + desiredChannelCount = channels; + if (usingDefaultChannels) { + ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &desiredChannelCount); /* <-- Not critical if this fails. */ + } + + desiredFormat = format; + if (usingDefaultFormat) { + desiredFormat = g_maFormatPriorities[0]; + } + + /* + If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next + loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases. + */ + MA_ZERO_OBJECT(&bestDeviceFormatSoFar); + + hasSupportedFormat = MA_FALSE; + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + ma_format format; + ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &format); + if (formatResult == MA_SUCCESS && format != ma_format_unknown) { + hasSupportedFormat = MA_TRUE; + bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat; + break; + } + } + + if (!hasSupportedFormat) { + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_FORMAT_NOT_SUPPORTED; + } + + + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat; + ma_format thisSampleFormat; + ma_result formatResult; + ma_format bestSampleFormatSoFar; + + /* If the format is not supported by miniaudio we need to skip this one entirely. */ + formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat); + if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) { + continue; /* The format is not supported by miniaudio. Skip. */ + } + + ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar); + + /* Getting here means the format is supported by miniaudio which makes this format a candidate. */ + if (thisDeviceFormat.mSampleRate != desiredSampleRate) { + /* + The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format + so far has an equal sample rate we can just ignore this one. + */ + if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) { + continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */ + } else { + /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) { + /* This format has a different sample rate _and_ a different channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; /* No change to the best format. */ + } else { + /* + Both this format and the best so far have different sample rates and different channel counts. Whichever has the + best format is the new best. + */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format. */ + } + } + } else { + /* This format has a different sample rate but the desired channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } else { + /* This format has the desired channel count, but the best so far does not. We have a new best. */ + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } + } + } + } else { + /* + The sample rates match which makes this format a very high priority contender. If the best format so far has a different + sample rate it needs to be replaced with this one. + */ + if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) { + /* + In this case this format has the same channel count as what the client is requesting. If the best format so far has + a different count, this one becomes the new best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */ + if (thisSampleFormat == desiredFormat) { + bestDeviceFormatSoFar = thisDeviceFormat; + break; /* Found the exact match. */ + } else { + /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } else { + /* + In this case the channel count is different to what the client has requested. If the best so far has the same channel + count as the requested count then it remains the best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; + } else { + /* + This is the case where both have the same sample rate (good) but different channel counts. Right now both have about + the same priority, but we need to compare the format now. + */ + if (thisSampleFormat == bestSampleFormatSoFar) { + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } + } + } + } + + *pFormat = bestDeviceFormatSoFar; + + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + AudioUnitScope deviceScope; + AudioUnitElement deviceBus; + UInt32 channelLayoutSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + if (deviceType == ma_device_type_playback) { + deviceScope = kAudioUnitScope_Output; + deviceBus = MA_COREAUDIO_OUTPUT_BUS; + } else { + deviceScope = kAudioUnitScope_Input; + deviceBus = MA_COREAUDIO_INPUT_BUS; + } + + status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma__malloc_from_callbacks(channelLayoutSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize); + if (status != noErr) { + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); + if (result != MA_SUCCESS) { + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + +static ma_bool32 ma_context_is_device_id_equal__coreaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return strcmp(pID0->coreaudio, pID1->coreaudio) == 0; +} + +static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ +#if defined(MA_APPLE_DESKTOP) + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + ma_result result; + UInt32 iDevice; + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + ma_device_info info; + + MA_ZERO_OBJECT(&info); + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) { + continue; + } + if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) { + continue; + } + + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + break; + } + } + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + break; + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); +#else + /* Only supporting default devices on non-Desktop platforms. */ + ma_device_info info; + + MA_ZERO_OBJECT(&info); + ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + return MA_SUCCESS; + } + + MA_ZERO_OBJECT(&info); + ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + return MA_SUCCESS; + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* No exclusive mode with the Core Audio backend for now. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + +#if defined(MA_APPLE_DESKTOP) + /* Desktop */ + { + AudioObjectID deviceObjectID; + UInt32 streamDescriptionCount; + AudioStreamRangedDescription* pStreamDescriptions; + UInt32 iStreamDescription; + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name); + if (result != MA_SUCCESS) { + return result; + } + + /* Formats. */ + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) { + ma_format format; + ma_bool32 formatExists = MA_FALSE; + ma_uint32 iOutputFormat; + + result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format); + if (result != MA_SUCCESS) { + continue; + } + + MA_ASSERT(format != ma_format_unknown); + + /* Make sure the format isn't already in the output list. */ + for (iOutputFormat = 0; iOutputFormat < pDeviceInfo->formatCount; ++iOutputFormat) { + if (pDeviceInfo->formats[iOutputFormat] == format) { + formatExists = MA_TRUE; + break; + } + } + + if (!formatExists) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = format; + } + } + + ma_free(pStreamDescriptions, &pContext->allocationCallbacks); + + + /* Channels. */ + result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &pDeviceInfo->minChannels); + if (result != MA_SUCCESS) { + return result; + } + pDeviceInfo->maxChannels = pDeviceInfo->minChannels; + + + /* Sample rates. */ + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + if (sampleRateRangeCount > 0) { + UInt32 iSampleRate; + pDeviceInfo->minSampleRate = UINT32_MAX; + pDeviceInfo->maxSampleRate = 0; + for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) { + if (pDeviceInfo->minSampleRate > pSampleRateRanges[iSampleRate].mMinimum) { + pDeviceInfo->minSampleRate = pSampleRateRanges[iSampleRate].mMinimum; + } + if (pDeviceInfo->maxSampleRate < pSampleRateRanges[iSampleRate].mMaximum) { + pDeviceInfo->maxSampleRate = pSampleRateRanges[iSampleRate].mMaximum; + } + } + } + } +#else + /* Mobile */ + { + AudioComponentDescription desc; + AudioComponent component; + AudioUnit audioUnit; + OSStatus status; + AudioUnitScope formatScope; + AudioUnitElement formatElement; + AudioStreamBasicDescription bestFormat; + UInt32 propSize; + + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* + Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is + reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to + retrieve from the AVAudioSession shared instance. + */ + desc.componentType = kAudioUnitType_Output; + desc.componentSubType = kAudioUnitSubType_RemoteIO; + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (component == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + propSize = sizeof(bestFormat); + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + return ma_result_from_OSStatus(status); + } + + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + audioUnit = NULL; + + + pDeviceInfo->minChannels = bestFormat.mChannelsPerFrame; + pDeviceInfo->maxChannels = bestFormat.mChannelsPerFrame; + + pDeviceInfo->formatCount = 1; + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->formats[0]); + if (result != MA_SUCCESS) { + return result; + } + + /* + It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do + this we just get the shared instance and inspect. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + pDeviceInfo->minSampleRate = (ma_uint32)pAudioSession.sampleRate; + pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; + } + } +#endif + + (void)pDeviceInfo; /* Unused. */ + return MA_SUCCESS; +} + + +static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_stream_layout layout; + + MA_ASSERT(pDevice != NULL); + +#if defined(MA_DEBUG_OUTPUT) + printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers); +#endif + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + + if (layout == ma_stream_layout_interleaved) { + /* For now we can assume everything is interleaved. */ + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) { + ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (frameCountForThisBuffer > 0) { + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData); + } + } + + #if defined(MA_DEBUG_OUTPUT) + printf(" frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just + output silence here. + */ + MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); + + #if defined(MA_DEBUG_OUTPUT) + printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } + } + } else { + /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) { + ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat); + ma_uint32 framesRemaining = frameCountPerBuffer; + + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (framesToRead > framesRemaining) { + framesToRead = framesRemaining; + } + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, framesToRead, tempBuffer, &pDevice->coreaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, framesToRead, tempBuffer); + } + + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + } + + ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); + + framesRemaining -= framesToRead; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + + return noErr; +} + +static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + AudioBufferList* pRenderedBufferList; + ma_stream_layout layout; + OSStatus status; + + MA_ASSERT(pDevice != NULL); + + pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; + MA_ASSERT(pRenderedBufferList); + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + +#if defined(MA_DEBUG_OUTPUT) + printf("INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pRenderedBufferList->mNumberBuffers); +#endif + + status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList); + if (status != noErr) { + #if defined(MA_DEBUG_OUTPUT) + printf(" ERROR: AudioUnitRender() failed with %d\n", status); + #endif + return status; + } + + if (layout == ma_stream_layout_interleaved) { + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) { + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData); + } + #if defined(MA_DEBUG_OUTPUT) + printf(" mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. + */ + ma_uint8 silentBuffer[4096]; + ma_uint32 framesRemaining; + + MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer)); + + framesRemaining = frameCount; + while (framesRemaining > 0) { + ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, framesToSend, silentBuffer, &pDevice->coreaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, framesToSend, silentBuffer); + } + + framesRemaining -= framesToSend; + } + + #if defined(MA_DEBUG_OUTPUT) + printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, pRenderedBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } + } + } else { + /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) { + ma_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_sample(pDevice->capture.internalFormat); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + } + + ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, framesToSend, tempBuffer, &pDevice->coreaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, framesToSend, tempBuffer); + } + + framesRemaining -= framesToSend; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + (void)pUnusedBufferList; + + return noErr; +} + +static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + /* + There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like + AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit) + can try waiting on the same lock. I'm going to try working around this by not calling any Core + Audio APIs in the callback when the device has been stopped or uninitialized. + */ + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED || ma_device__get_state(pDevice) == MA_STATE_STOPPING || ma_device__get_state(pDevice) == MA_STATE_STOPPED) { + ma_stop_proc onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + ma_event_signal(&pDevice->coreaudio.stopEvent); + } else { + UInt32 isRunning; + UInt32 isRunningSize = sizeof(isRunning); + OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize); + if (status != noErr) { + return; /* Don't really know what to do in this case... just ignore it, I suppose... */ + } + + if (!isRunning) { + ma_stop_proc onStop; + + /* + The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider: + + 1) When the device is unplugged, this will be called _before_ the default device change notification. + 2) When the device is changed via the default device change notification, this will be called _after_ the switch. + + For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag. + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) { + /* + It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device + via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the + device to be seamless to the client (we don't want them receiving the onStop event and thinking that the device has stopped when it + hasn't!). + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) { + return; + } + + /* + Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio + will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most + likely be successful in switching to the new device. + + TODO: Try to predict if Core Audio will switch devices. If not, the onStop callback needs to be posted. + */ + return; + } + + /* Getting here means we need to stop the device. */ + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + } + } + + (void)propertyID; /* Unused. */ +} + +#if defined(MA_APPLE_DESKTOP) +static ma_uint32 g_DeviceTrackingInitCounter_CoreAudio = 0; +static ma_mutex g_DeviceTrackingMutex_CoreAudio; +static ma_device** g_ppTrackedDevices_CoreAudio = NULL; +static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0; +static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0; + +static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData) +{ + ma_device_type deviceType; + + /* Not sure if I really need to check this, but it makes me feel better. */ + if (addressCount == 0) { + return noErr; + } + + if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) { + deviceType = ma_device_type_playback; + } else if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) { + deviceType = ma_device_type_capture; + } else { + return noErr; /* Should never hit this. */ + } + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + ma_result reinitResult; + ma_device* pDevice; + + pDevice = g_ppTrackedDevices_CoreAudio[iDevice]; + if (pDevice->type == deviceType || pDevice->type == ma_device_type_duplex) { + if (deviceType == ma_device_type_playback) { + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE; + } else { + pDevice->coreaudio.isSwitchingCaptureDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingCaptureDevice = MA_FALSE; + } + + if (reinitResult == MA_SUCCESS) { + ma_device__post_init_setup(pDevice, deviceType); + + /* Restart the device if required. If this fails we need to stop the device entirely. */ + if (ma_device__get_state(pDevice) == MA_STATE_STARTED) { + OSStatus status; + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } + } else if (deviceType == ma_device_type_capture) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } + } + } + } + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + /* Unused parameters. */ + (void)objectID; + (void)pUserData; + + return noErr; +} + +static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + if (ma_atomic_increment_32(&g_DeviceTrackingInitCounter_CoreAudio) == 1) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + ma_mutex_init(pContext, &g_DeviceTrackingMutex_CoreAudio); + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + } + + return MA_SUCCESS; +} + +static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + if (ma_atomic_decrement_32(&g_DeviceTrackingInitCounter_CoreAudio) == 0) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = kAudioObjectPropertyElementMaster; + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + /* At this point there should be no tracked devices. If so there's an error somewhere. */ + MA_ASSERT(g_ppTrackedDevices_CoreAudio == NULL); + MA_ASSERT(g_TrackedDeviceCount_CoreAudio == 0); + + ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio); + } + + return MA_SUCCESS; +} + +static ma_result ma_device__track__coreaudio(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + result = ma_context__init_device_tracking__coreaudio(pDevice->pContext); + if (result != MA_SUCCESS) { + return result; + } + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + /* Allocate memory if required. */ + if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) { + ma_uint32 oldCap; + ma_uint32 newCap; + ma_device** ppNewDevices; + + oldCap = g_TrackedDeviceCap_CoreAudio; + newCap = g_TrackedDeviceCap_CoreAudio * 2; + if (newCap == 0) { + newCap = 1; + } + + ppNewDevices = (ma_device**)ma__realloc_from_callbacks(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, sizeof(*g_ppTrackedDevices_CoreAudio)*oldCap, &pDevice->pContext->allocationCallbacks); + if (ppNewDevices == NULL) { + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + return MA_OUT_OF_MEMORY; + } + + g_ppTrackedDevices_CoreAudio = ppNewDevices; + g_TrackedDeviceCap_CoreAudio = newCap; + } + + g_ppTrackedDevices_CoreAudio[g_TrackedDeviceCount_CoreAudio] = pDevice; + g_TrackedDeviceCount_CoreAudio += 1; + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + if (g_ppTrackedDevices_CoreAudio[iDevice] == pDevice) { + /* We've found the device. We now need to remove it from the list. */ + ma_uint32 jDevice; + for (jDevice = iDevice; jDevice < g_TrackedDeviceCount_CoreAudio-1; jDevice += 1) { + g_ppTrackedDevices_CoreAudio[jDevice] = g_ppTrackedDevices_CoreAudio[jDevice+1]; + } + + g_TrackedDeviceCount_CoreAudio -= 1; + + /* If there's nothing else in the list we need to free memory. */ + if (g_TrackedDeviceCount_CoreAudio == 0) { + ma__free_from_callbacks(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks); + g_ppTrackedDevices_CoreAudio = NULL; + g_TrackedDeviceCap_CoreAudio = 0; + } + + break; + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + result = ma_context__uninit_device_tracking__coreaudio(pDevice->pContext); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} +#endif + +#if defined(MA_APPLE_MOBILE) +@interface ma_router_change_handler:NSObject { + ma_device* m_pDevice; +} +@end + +@implementation ma_router_change_handler +-(id)init:(ma_device*)pDevice +{ + self = [super init]; + m_pDevice = pDevice; + + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]]; + + return self; +} + +-(void)dealloc +{ + [self remove_handler]; +} + +-(void)remove_handler +{ + [[NSNotificationCenter defaultCenter] removeObserver:self name:@"AVAudioSessionRouteChangeNotification" object:nil]; +} + +-(void)handle_route_change:(NSNotification*)pNotification +{ + AVAudioSession* pSession = [AVAudioSession sharedInstance]; + + NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue]; + switch (reason) + { + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonWakeFromSleep: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonOverride: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonCategoryChange: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonUnknown: + default: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n"); + #endif + } break; + } + + m_pDevice->sampleRate = (ma_uint32)pSession.sampleRate; + + if (m_pDevice->type == ma_device_type_capture || m_pDevice->type == ma_device_type_duplex) { + m_pDevice->capture.channels = (ma_uint32)pSession.inputNumberOfChannels; + ma_device__post_init_setup(m_pDevice, ma_device_type_capture); + } + if (m_pDevice->type == ma_device_type_playback || m_pDevice->type == ma_device_type_duplex) { + m_pDevice->playback.channels = (ma_uint32)pSession.outputNumberOfChannels; + ma_device__post_init_setup(m_pDevice, ma_device_type_playback); + } +} +@end +#endif + +static void ma_device_uninit__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED); + +#if defined(MA_APPLE_DESKTOP) + /* + Make sure we're no longer tracking the device. It doesn't matter if we call this for a non-default device because it'll + just gracefully ignore it. + */ + ma_device__untrack__coreaudio(pDevice); +#endif +#if defined(MA_APPLE_MOBILE) + if (pDevice->coreaudio.pRouteChangeHandler != NULL) { + ma_router_change_handler* pRouteChangeHandler = (__bridge_transfer ma_router_change_handler*)pDevice->coreaudio.pRouteChangeHandler; + [pRouteChangeHandler remove_handler]; + } +#endif + + if (pDevice->coreaudio.audioUnitCapture != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.audioUnitPlayback != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + + if (pDevice->coreaudio.pAudioBufferList) { + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->coreaudio.duplexRB); + } +} + +typedef struct +{ + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_bool32 usingDefaultFormat; + ma_bool32 usingDefaultChannels; + ma_bool32 usingDefaultSampleRate; + ma_bool32 usingDefaultChannelMap; + ma_share_mode shareMode; + ma_bool32 registerStopEvent; + + /* Output. */ +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + AudioComponent component; + AudioUnit audioUnit; + AudioBufferList* pAudioBufferList; /* Only used for input devices. */ + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + char deviceName[256]; +} ma_device_init_internal_data__coreaudio; + +static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */ +{ + ma_result result; + OSStatus status; + UInt32 enableIOFlag; + AudioStreamBasicDescription bestFormat; + ma_uint32 actualPeriodSizeInFrames; + AURenderCallbackStruct callbackInfo; +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + + /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pContext != NULL); + MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture); + +#if defined(MA_APPLE_DESKTOP) + pData->deviceObjectID = 0; +#endif + pData->component = NULL; + pData->audioUnit = NULL; + pData->pAudioBufferList = NULL; + +#if defined(MA_APPLE_DESKTOP) + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + pData->deviceObjectID = deviceObjectID; +#endif + + /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */ + pData->periodsOut = pData->periodsIn; + if (pData->periodsOut == 0) { + pData->periodsOut = MA_DEFAULT_PERIODS; + } + if (pData->periodsOut > 16) { + pData->periodsOut = 16; + } + + + /* Audio unit. */ + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + + /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */ + enableIOFlag = 1; + if (deviceType == ma_device_type_capture) { + enableIOFlag = 0; + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + enableIOFlag = (enableIOFlag == 0) ? 1 : 0; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + + /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */ +#if defined(MA_APPLE_DESKTOP) + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS, &deviceObjectID, sizeof(AudioDeviceID)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(result); + } +#endif + + /* + Format. This is the hardest part of initialization because there's a few variables to take into account. + 1) The format must be supported by the device. + 2) The format must be supported miniaudio. + 3) There's a priority that miniaudio prefers. + + Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The + most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same + for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely. + + On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to. + */ + { + AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + #if defined(MA_APPLE_DESKTOP) + AudioStreamBasicDescription origFormat; + UInt32 origFormatSize; + + result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, pData->usingDefaultFormat, pData->usingDefaultChannels, pData->usingDefaultSampleRate, &bestFormat); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + /* From what I can see, Apple's documentation implies that we should keep the sample rate consistent. */ + origFormatSize = sizeof(origFormat); + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize); + } else { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize); + } + + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + bestFormat.mSampleRate = origFormat.mSampleRate; + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + /* We failed to set the format, so fall back to the current format of the audio unit. */ + bestFormat = origFormat; + } + #else + UInt32 propSize = sizeof(bestFormat); + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + /* + Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try + setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since + it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I + can tell, it looks like the sample rate is shared between playback and capture for everything. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil]; + bestFormat.mSampleRate = pAudioSession.sampleRate; + + /* + I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with + AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead. + */ + if (deviceType == ma_device_type_playback) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels; + } + if (deviceType == ma_device_type_capture) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels; + } + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + #endif + + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + if (pData->formatOut == ma_format_unknown) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_FORMAT_NOT_SUPPORTED; + } + + pData->channelsOut = bestFormat.mChannelsPerFrame; + pData->sampleRateOut = bestFormat.mSampleRate; + } + + /* + Internal channel map. This is weird in my testing. If I use the AudioObject to get the + channel map, the channel descriptions are set to "Unknown" for some reason. To work around + this it looks like retrieving it from the AudioUnit will work. However, and this is where + it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore + I'm going to fall back to a default assumption in these cases. + */ +#if defined(MA_APPLE_DESKTOP) + result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut); + if (result != MA_SUCCESS) { + #if 0 + /* Try falling back to the channel map from the AudioObject. */ + result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut); + if (result != MA_SUCCESS) { + return result; + } + #else + /* Fall back to default assumptions. */ + ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut); + #endif + } +#else + /* TODO: Figure out how to get the channel map using AVAudioSession. */ + ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut); +#endif + + + /* Buffer size. Not allowing this to be configurable on iOS. */ + actualPeriodSizeInFrames = pData->periodSizeInFramesIn; + +#if defined(MA_APPLE_DESKTOP) + if (actualPeriodSizeInFrames == 0) { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, pData->sampleRateOut); + } + + result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualPeriodSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } + + pData->periodSizeInFramesOut = actualPeriodSizeInFrames; +#else + actualPeriodSizeInFrames = 2048; + pData->periodSizeInFramesOut = actualPeriodSizeInFrames; +#endif + + + /* + During testing I discovered that the buffer size can be too big. You'll get an error like this: + + kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512 + + Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that + of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice. + */ + { + /*AudioUnitScope propScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + AudioUnitElement propBus = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, propScope, propBus, &actualBufferSizeInFrames, sizeof(actualBufferSizeInFrames)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + }*/ + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* We need a buffer list if this is an input device. We render into this in the input callback. */ + if (deviceType == ma_device_type_capture) { + ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0; + size_t allocationSize; + AudioBufferList* pBufferList; + + allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */ + if (isInterleaved) { + /* Interleaved case. This is the simple case because we just have one buffer. */ + allocationSize += sizeof(AudioBuffer) * 1; + allocationSize += actualPeriodSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut); + } else { + /* Non-interleaved case. This is the more complex case because there's more than one buffer. */ + allocationSize += sizeof(AudioBuffer) * pData->channelsOut; + allocationSize += actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * pData->channelsOut; + } + + pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(allocationSize, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_OUT_OF_MEMORY; + } + + if (isInterleaved) { + pBufferList->mNumberBuffers = 1; + pBufferList->mBuffers[0].mNumberChannels = pData->channelsOut; + pBufferList->mBuffers[0].mDataByteSize = actualPeriodSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut); + pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList); + } else { + ma_uint32 iBuffer; + pBufferList->mNumberBuffers = pData->channelsOut; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + pBufferList->mBuffers[iBuffer].mNumberChannels = 1; + pBufferList->mBuffers[iBuffer].mDataByteSize = actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut); + pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * pData->channelsOut)) + (actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * iBuffer); + } + } + + pData->pAudioBufferList = pBufferList; + } + + /* Callbacks. */ + callbackInfo.inputProcRefCon = pDevice_DoNotReference; + if (deviceType == ma_device_type_playback) { + callbackInfo.inputProc = ma_on_output__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, MA_COREAUDIO_OUTPUT_BUS, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } else { + callbackInfo.inputProc = ma_on_input__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, MA_COREAUDIO_INPUT_BUS, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* We need to listen for stop events. */ + if (pData->registerStopEvent) { + status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* Initialize the audio unit. */ + status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit); + if (status != noErr) { + ma__free_from_callbacks(pData->pAudioBufferList, &pContext->allocationCallbacks); + pData->pAudioBufferList = NULL; + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + /* Grab the name. */ +#if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName); +#else + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + } else { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME); + } +#endif + + return result; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit) +{ + ma_device_init_internal_data__coreaudio data; + ma_result result; + + /* This should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + if (deviceType == ma_device_type_capture) { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; + data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap; + data.shareMode = pDevice->capture.shareMode; + data.registerStopEvent = MA_TRUE; + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.pAudioBufferList) { + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } else if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; + data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap; + data.shareMode = pDevice->playback.shareMode; + data.registerStopEvent = (pDevice->type != ma_device_type_duplex); + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + } + data.periodSizeInFramesIn = pDevice->coreaudio.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->coreaudio.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->coreaudio.originalPeriods; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pDevice->type == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + if (deviceType == ma_device_type_capture) { + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + } else if (deviceType == ma_device_type_playback) { + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + } + + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + +static ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the Core Audio backend for now. */ + if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Capture needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.formatIn = pConfig->capture.format; + data.channelsIn = pConfig->capture.channels; + data.sampleRateIn = pConfig->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); + data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; + data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap; + data.shareMode = pConfig->capture.shareMode; + data.periodSizeInFramesIn = pConfig->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds; + data.periodsIn = pConfig->periods; + data.registerStopEvent = MA_TRUE; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pConfig->deviceType == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pConfig->capture.pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL); + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + + #if defined(MA_APPLE_DESKTOP) + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly + switch the device in the background. + */ + if (pConfig->capture.pDeviceID == NULL) { + ma_device__track__coreaudio(pDevice); + } + #endif + } + + /* Playback. */ + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.formatIn = pConfig->playback.format; + data.channelsIn = pConfig->playback.channels; + data.sampleRateIn = pConfig->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); + data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; + data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; + data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; + data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap; + data.shareMode = pConfig->playback.shareMode; + + /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */ + if (pConfig->deviceType == ma_device_type_duplex) { + data.periodSizeInFramesIn = pDevice->capture.internalPeriodSizeInFrames; + data.periodsIn = pDevice->capture.internalPeriods; + data.registerStopEvent = MA_FALSE; + } else { + data.periodSizeInFramesIn = pConfig->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds; + data.periodsIn = pConfig->periods; + data.registerStopEvent = MA_TRUE; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (pDevice->coreaudio.pAudioBufferList) { + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } + return result; + } + + pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL); + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + + #if defined(MA_APPLE_DESKTOP) + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly + switch the device in the background. + */ + if (pConfig->playback.pDeviceID == NULL && (pConfig->deviceType != ma_device_type_duplex || pConfig->capture.pDeviceID != NULL)) { + ma_device__track__coreaudio(pDevice); + } + #endif + } + + pDevice->coreaudio.originalPeriodSizeInFrames = pConfig->periodSizeInFrames; + pDevice->coreaudio.originalPeriodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + pDevice->coreaudio.originalPeriods = pConfig->periods; + + /* + When stopping the device, a callback is called on another thread. We need to wait for this callback + before returning from ma_device_stop(). This event is used for this. + */ + ma_event_init(pContext, &pDevice->coreaudio.stopEvent); + + /* Need a ring buffer for duplex mode. */ + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods); + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->coreaudio.duplexRB); + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[Core Audio] Failed to initialize ring buffer.", result); + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 bufferSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; + void* pBufferData; + ma_pcm_rb_acquire_write(&pDevice->coreaudio.duplexRB, &bufferSizeInFrames, &pBufferData); + { + MA_ZERO_MEMORY(pBufferData, bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->coreaudio.duplexRB, bufferSizeInFrames, pBufferData); + } + } + + /* + We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done + differently on non-Desktop Apple platforms. + */ +#if defined(MA_APPLE_MOBILE) + pDevice->coreaudio.pRouteChangeHandler = (__bridge_retained void*)[[ma_router_change_handler alloc] init:pDevice]; +#endif + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + return ma_result_from_OSStatus(status); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* It's not clear from the documentation whether or not AudioOutputUnitStop() actually drains the device or not. */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + /* We need to wait for the callback to finish before returning. */ + ma_event_wait(&pDevice->coreaudio.stopEvent); + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_coreaudio); + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(pContext, pContext->coreaudio.hAudioUnit); + ma_dlclose(pContext, pContext->coreaudio.hCoreAudio); + ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation); +#endif + + (void)pContext; + return MA_SUCCESS; +} + +#if defined(MA_APPLE_MOBILE) +static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category) +{ + /* The "default" and "none" categories are treated different and should not be used as an input into this function. */ + MA_ASSERT(category != ma_ios_session_category_default); + MA_ASSERT(category != ma_ios_session_category_none); + + switch (category) { + case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient; + case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback; + case ma_ios_session_category_record: return AVAudioSessionCategoryRecord; + case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord; + case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute; + case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient; + default: return AVAudioSessionCategoryAmbient; + } +} +#endif + +static ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pContext != NULL); + +#if defined(MA_APPLE_MOBILE) + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions; + + MA_ASSERT(pAudioSession != NULL); + + if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) { + /* + I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails + we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category. + */ + #if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH) + options |= AVAudioSessionCategoryOptionDefaultToSpeaker; + #endif + + if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) { + /* Using PlayAndRecord */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) { + /* Using Playback */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) { + /* Using Record */ + } else { + /* Leave as default? */ + } + } else { + if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) { + if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) { + return MA_INVALID_OPERATION; /* Failed to set session category. */ + } + } + } + } +#endif + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + pContext->coreaudio.hCoreFoundation = ma_dlopen(pContext, "CoreFoundation.framework/CoreFoundation"); + if (pContext->coreaudio.hCoreFoundation == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString"); + pContext->coreaudio.CFRelease = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFRelease"); + + + pContext->coreaudio.hCoreAudio = ma_dlopen(pContext, "CoreAudio.framework/CoreAudio"); + if (pContext->coreaudio.hCoreAudio == NULL) { + ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData"); + pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize"); + pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData"); + pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener"); + pContext->coreaudio.AudioObjectRemovePropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectRemovePropertyListener"); + + /* + It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still + defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback. + The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to + AudioToolbox. + */ + pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioUnit.framework/AudioUnit"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(pContext, pContext->coreaudio.hCoreAudio); + ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + if (ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) { + /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */ + ma_dlclose(pContext, pContext->coreaudio.hAudioUnit); + pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioToolbox.framework/AudioToolbox"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(pContext, pContext->coreaudio.hCoreAudio); + ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + } + + pContext->coreaudio.AudioComponentFindNext = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext"); + pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose"); + pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew"); + pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart"); + pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop"); + pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener"); + pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo"); + pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty"); + pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty"); + pContext->coreaudio.AudioUnitInitialize = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitInitialize"); + pContext->coreaudio.AudioUnitRender = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitRender"); +#else + pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString; + pContext->coreaudio.CFRelease = (ma_proc)CFRelease; + + #if defined(MA_APPLE_DESKTOP) + pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData; + pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize; + pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData; + pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener; + pContext->coreaudio.AudioObjectRemovePropertyListener = (ma_proc)AudioObjectRemovePropertyListener; + #endif + + pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext; + pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose; + pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew; + pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart; + pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop; + pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener; + pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo; + pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty; + pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty; + pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize; + pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender; +#endif + + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__coreaudio; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__coreaudio; + pContext->onEnumDevices = ma_context_enumerate_devices__coreaudio; + pContext->onGetDeviceInfo = ma_context_get_device_info__coreaudio; + pContext->onDeviceInit = ma_device_init__coreaudio; + pContext->onDeviceUninit = ma_device_uninit__coreaudio; + pContext->onDeviceStart = ma_device_start__coreaudio; + pContext->onDeviceStop = ma_device_stop__coreaudio; + + /* Audio component. */ + { + AudioComponentDescription desc; + desc.componentType = kAudioUnitType_Output; + #if defined(MA_APPLE_DESKTOP) + desc.componentSubType = kAudioUnitSubType_HALOutput; + #else + desc.componentSubType = kAudioUnitSubType_RemoteIO; + #endif + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (pContext->coreaudio.component == NULL) { + #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(pContext, pContext->coreaudio.hAudioUnit); + ma_dlclose(pContext, pContext->coreaudio.hCoreAudio); + ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation); + #endif + return MA_FAILED_TO_INIT_BACKEND; + } + } + + return MA_SUCCESS; +} +#endif /* Core Audio */ + + + +/****************************************************************************** + +sndio Backend + +******************************************************************************/ +#ifdef MA_HAS_SNDIO +#include +#include + +/* +Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due +to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device +just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's +demand for it or if I can get it tested and debugged more thoroughly. +*/ +#if 0 +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#endif +#if defined(__FreeBSD__) || defined(__DragonFly__) +#include +#endif +#endif + +#define MA_SIO_DEVANY "default" +#define MA_SIO_PLAY 1 +#define MA_SIO_REC 2 +#define MA_SIO_NENC 8 +#define MA_SIO_NCHAN 8 +#define MA_SIO_NRATE 16 +#define MA_SIO_NCONF 4 + +struct ma_sio_hdl; /* <-- Opaque */ + +struct ma_sio_par +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; + unsigned int bufsz; + unsigned int xrun; + unsigned int round; + unsigned int appbufsz; + int __pad[3]; + unsigned int __magic; +}; + +struct ma_sio_enc +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; +}; + +struct ma_sio_conf +{ + unsigned int enc; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; +}; + +struct ma_sio_cap +{ + struct ma_sio_enc enc[MA_SIO_NENC]; + unsigned int rchan[MA_SIO_NCHAN]; + unsigned int pchan[MA_SIO_NCHAN]; + unsigned int rate[MA_SIO_NRATE]; + int __pad[7]; + unsigned int nconf; + struct ma_sio_conf confs[MA_SIO_NCONF]; +}; + +typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int); +typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*); +typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t); +typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t); +typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*); + +static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) { + if (g_maStandardSampleRatePriorities[i] == sampleRate) { + return i; + } + } + + return (ma_uint32)-1; +} + +static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb) +{ + /* We only support native-endian right now. */ + if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) { + return ma_format_unknown; + } + + if (bits == 8 && bps == 1 && sig == 0) { + return ma_format_u8; + } + if (bits == 16 && bps == 2 && sig == 1) { + return ma_format_s16; + } + if (bits == 24 && bps == 3 && sig == 1) { + return ma_format_s24; + } + if (bits == 24 && bps == 4 && sig == 1 && msb == 0) { + /*return ma_format_s24_32;*/ + } + if (bits == 32 && bps == 4 && sig == 1) { + return ma_format_s32; + } + + return ma_format_unknown; +} + +static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) +{ + ma_format bestFormat; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + + bestFormat = ma_format_unknown; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + if (bestFormat == ma_format_unknown) { + bestFormat = format; + } else { + if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */ + bestFormat = format; + } + } + } + } + + return bestFormat; +} + +static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat) +{ + ma_uint32 maxChannels; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + + /* Just pick whatever configuration has the most channels. */ + maxChannels = 0; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (maxChannels < channels) { + maxChannels = channels; + } + } + } + } + + return maxChannels; +} + +static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels) +{ + ma_uint32 firstSampleRate; + ma_uint32 bestSampleRate; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + MA_ASSERT(requiredChannels > 0); + MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS); + + firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */ + bestSampleRate = 0; + + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + unsigned int iRate; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (channels != requiredChannels) { + continue; + } + + /* Getting here means we have found a compatible encoding/channel pair. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + ma_uint32 rate = (ma_uint32)caps->rate[iRate]; + ma_uint32 ratePriority; + + if (firstSampleRate == 0) { + firstSampleRate = rate; + } + + /* Disregard this rate if it's not a standard one. */ + ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate); + if (ratePriority == (ma_uint32)-1) { + continue; + } + + if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */ + bestSampleRate = rate; + } + } + } + } + } + + /* If a standard sample rate was not found just fall back to the first one that was iterated. */ + if (bestSampleRate == 0) { + bestSampleRate = firstSampleRate; + } + + return bestSampleRate; +} + + +static ma_bool32 ma_context_is_device_id_equal__sndio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->sndio, pID1->sndio) == 0; +} + +static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 isTerminating = MA_FALSE; + struct ma_sio_hdl* handle; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */ + + /* Playback. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0); + if (handle != NULL) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + /* Capture. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0); + if (handle != NULL) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default"); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + char devid[256]; + struct ma_sio_hdl* handle; + struct ma_sio_cap caps; + unsigned int iConfig; + + MA_ASSERT(pContext != NULL); + (void)shareMode; + + /* We need to open the device before we can get information about it. */ + if (pDeviceID == NULL) { + ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME); + } else { + ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid); + } + + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0); + if (handle == NULL) { + return MA_NO_DEVICE; + } + + if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) { + return MA_ERROR; + } + + for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) { + /* + The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give + preference to some formats over others. + */ + unsigned int iEncoding; + unsigned int iChannel; + unsigned int iRate; + + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + ma_bool32 formatExists = MA_FALSE; + ma_uint32 iExistingFormat; + + if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps.enc[iEncoding].bits; + bps = caps.enc[iEncoding].bps; + sig = caps.enc[iEncoding].sig; + le = caps.enc[iEncoding].le; + msb = caps.enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + /* Add this format if it doesn't already exist. */ + for (iExistingFormat = 0; iExistingFormat < pDeviceInfo->formatCount; iExistingFormat += 1) { + if (pDeviceInfo->formats[iExistingFormat] == format) { + formatExists = MA_TRUE; + break; + } + } + + if (!formatExists) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = format; + } + } + + /* Channels. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps.confs[iConfig].pchan; + } else { + chan = caps.confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps.pchan[iChannel]; + } else { + channels = caps.rchan[iChannel]; + } + + if (pDeviceInfo->minChannels > channels) { + pDeviceInfo->minChannels = channels; + } + if (pDeviceInfo->maxChannels < channels) { + pDeviceInfo->maxChannels = channels; + } + } + + /* Sample rates. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) { + unsigned int rate = caps.rate[iRate]; + if (pDeviceInfo->minSampleRate > rate) { + pDeviceInfo->minSampleRate = rate; + } + if (pDeviceInfo->maxSampleRate < rate) { + pDeviceInfo->maxSampleRate = rate; + } + } + } + } + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + return MA_SUCCESS; +} + +static void ma_device_uninit__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } +} + +static ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + const char* pDeviceName; + ma_ptr handle; + int openFlags = 0; + struct ma_sio_cap caps; + struct ma_sio_par par; + ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture) { + openFlags = MA_SIO_REC; + pDeviceID = pConfig->capture.pDeviceID; + format = pConfig->capture.format; + channels = pConfig->capture.channels; + sampleRate = pConfig->sampleRate; + } else { + openFlags = MA_SIO_PLAY; + pDeviceID = pConfig->playback.pDeviceID; + format = pConfig->playback.format; + channels = pConfig->playback.channels; + sampleRate = pConfig->sampleRate; + } + + pDeviceName = MA_SIO_DEVANY; + if (pDeviceID != NULL) { + pDeviceName = pDeviceID->sndio; + } + + handle = (ma_ptr)((ma_sio_open_proc)pContext->sndio.sio_open)(pDeviceName, openFlags, 0); + if (handle == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + /* We need to retrieve the device caps to determine the most appropriate format to use. */ + if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) { + ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps.", MA_ERROR); + } + + /* + Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real + way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this + to the requested channels, regardless of whether or not the default channel count is requested. + + For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the + value returned by ma_find_best_channels_from_sio_cap__sndio(). + */ + if (deviceType == ma_device_type_capture) { + if (pDevice->capture.usingDefaultFormat) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + if (pDevice->capture.usingDefaultChannels) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } + } + } else { + if (pDevice->playback.usingDefaultFormat) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + if (pDevice->playback.usingDefaultChannels) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } + } + } + + if (pDevice->usingDefaultSampleRate) { + sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels); + } + + + ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par); + par.msb = 0; + par.le = ma_is_little_endian(); + + switch (format) { + case ma_format_u8: + { + par.bits = 8; + par.bps = 1; + par.sig = 0; + } break; + + case ma_format_s24: + { + par.bits = 24; + par.bps = 3; + par.sig = 1; + } break; + + case ma_format_s32: + { + par.bits = 32; + par.bps = 4; + par.sig = 1; + } break; + + case ma_format_s16: + case ma_format_f32: + default: + { + par.bits = 16; + par.bps = 2; + par.sig = 1; + } break; + } + + if (deviceType == ma_device_type_capture) { + par.rchan = channels; + } else { + par.pchan = channels; + } + + par.rate = sampleRate; + + internalPeriodSizeInFrames = pConfig->periodSizeInFrames; + if (internalPeriodSizeInFrames == 0) { + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, par.rate); + } + + par.round = internalPeriodSizeInFrames; + par.appbufsz = par.round * pConfig->periods; + + if (((ma_sio_setpar_proc)pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size.", MA_FORMAT_NOT_SUPPORTED); + } + if (((ma_sio_getpar_proc)pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size.", MA_FORMAT_NOT_SUPPORTED); + } + + internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb); + internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan; + internalSampleRate = par.rate; + internalPeriods = par.appbufsz / par.round; + internalPeriodSizeInFrames = par.round; + + if (deviceType == ma_device_type_capture) { + pDevice->sndio.handleCapture = handle; + pDevice->capture.internalFormat = internalFormat; + pDevice->capture.internalChannels = internalChannels; + pDevice->capture.internalSampleRate = internalSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->capture.internalPeriods = internalPeriods; + } else { + pDevice->sndio.handlePlayback = handle; + pDevice->playback.internalFormat = internalFormat; + pDevice->playback.internalChannels = internalChannels; + pDevice->playback.internalSampleRate = internalSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->playback.internalPeriods = internalPeriods; + } + +#ifdef MA_DEBUG_OUTPUT + printf("DEVICE INFO\n"); + printf(" Format: %s\n", ma_get_format_name(internalFormat)); + printf(" Channels: %d\n", internalChannels); + printf(" Sample Rate: %d\n", internalSampleRate); + printf(" Period Size: %d\n", internalPeriodSizeInFrames); + printf(" Periods: %d\n", internalPeriods); + printf(" appbufsz: %d\n", par.appbufsz); + printf(" round: %d\n", par.round); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->sndio); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the documentation: + + The sio_stop() function puts the audio subsystem in the same state as before sio_start() is called. It stops recording, drains the play buffer and then + stops playback. If samples to play are queued but playback hasn't started yet then playback is forced immediately; playback will actually stop once the + buffer is drained. In no case are samples in the play buffer discarded. + + Therefore, sio_stop() performs all of the necessary draining for us. + */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result == 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device.", MA_IO_ERROR); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result == 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device.", MA_IO_ERROR); + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_main_loop__sndio(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + /* Devices need to be started here. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */ + } + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__sndio(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__sndio(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__sndio()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__sndio(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__sndio(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + + /* Here is where the device is stopped. */ + ma_device_stop__sndio(pDevice); + + return result; +} + +static ma_result ma_context_uninit__sndio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_sndio); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__sndio(const ma_context_config* pConfig, ma_context* pContext) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libsndioNames[] = { + "libsndio.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libsndioNames); ++i) { + pContext->sndio.sndioSO = ma_dlopen(pContext, libsndioNames[i]); + if (pContext->sndio.sndioSO != NULL) { + break; + } + } + + if (pContext->sndio.sndioSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->sndio.sio_open = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_open"); + pContext->sndio.sio_close = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_close"); + pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_setpar"); + pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getpar"); + pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getcap"); + pContext->sndio.sio_write = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_write"); + pContext->sndio.sio_read = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_read"); + pContext->sndio.sio_start = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_start"); + pContext->sndio.sio_stop = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_stop"); + pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_initpar"); +#else + pContext->sndio.sio_open = sio_open; + pContext->sndio.sio_close = sio_close; + pContext->sndio.sio_setpar = sio_setpar; + pContext->sndio.sio_getpar = sio_getpar; + pContext->sndio.sio_getcap = sio_getcap; + pContext->sndio.sio_write = sio_write; + pContext->sndio.sio_read = sio_read; + pContext->sndio.sio_start = sio_start; + pContext->sndio.sio_stop = sio_stop; + pContext->sndio.sio_initpar = sio_initpar; +#endif + + pContext->onUninit = ma_context_uninit__sndio; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__sndio; + pContext->onEnumDevices = ma_context_enumerate_devices__sndio; + pContext->onGetDeviceInfo = ma_context_get_device_info__sndio; + pContext->onDeviceInit = ma_device_init__sndio; + pContext->onDeviceUninit = ma_device_uninit__sndio; + pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceMainLoop = ma_device_main_loop__sndio; + + (void)pConfig; + return MA_SUCCESS; +} +#endif /* sndio */ + + + +/****************************************************************************** + +audio(4) Backend + +******************************************************************************/ +#ifdef MA_HAS_AUDIO4 +#include +#include +#include +#include +#include +#include +#include + +#if defined(__OpenBSD__) + #include + #if defined(OpenBSD) && OpenBSD >= 201709 + #define MA_AUDIO4_USE_NEW_API + #endif +#endif + +static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex) +{ + size_t baseLen; + + MA_ASSERT(id != NULL); + MA_ASSERT(idSize > 0); + MA_ASSERT(deviceIndex >= 0); + + baseLen = strlen(base); + MA_ASSERT(idSize > baseLen); + + ma_strcpy_s(id, idSize, base); + ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10); +} + +static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut) +{ + size_t idLen; + size_t baseLen; + const char* deviceIndexStr; + + MA_ASSERT(id != NULL); + MA_ASSERT(base != NULL); + MA_ASSERT(pIndexOut != NULL); + + idLen = strlen(id); + baseLen = strlen(base); + if (idLen <= baseLen) { + return MA_ERROR; /* Doesn't look like the id starts with the base. */ + } + + if (strncmp(id, base, baseLen) != 0) { + return MA_ERROR; /* ID does not begin with base. */ + } + + deviceIndexStr = id + baseLen; + if (deviceIndexStr[0] == '\0') { + return MA_ERROR; /* No index specified in the ID. */ + } + + if (pIndexOut) { + *pIndexOut = atoi(deviceIndexStr); + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_context_is_device_id_equal__audio4(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->audio4, pID1->audio4) == 0; +} + +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ +static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision) +{ + if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) { + return ma_format_u8; + } else { + if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } + } + + return ma_format_unknown; /* Encoding not supported. */ +} + +static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision) +{ + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(pEncoding != NULL); + MA_ASSERT(pPrecision != NULL); + + switch (format) + { + case ma_format_u8: + { + *pEncoding = AUDIO_ENCODING_ULINEAR; + *pPrecision = 8; + } break; + + case ma_format_s24: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 24; + } break; + + case ma_format_s32: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 32; + } break; + + case ma_format_s16: + case ma_format_f32: + default: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 16; + } break; + } +} + +static ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo) +{ + return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision); +} +#else +static ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) +{ + if (par->bits == 8 && par->bps == 1 && par->sig == 0) { + return ma_format_u8; + } + if (par->bits == 16 && par->bps == 2 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s16; + } + if (par->bits == 24 && par->bps == 3 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s24; + } + if (par->bits == 32 && par->bps == 4 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_f32; + } + + /* Format not supported. */ + return ma_format_unknown; +} +#endif + +static ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pInfoOut) +{ + audio_device_t fdDevice; +#if !defined(MA_AUDIO4_USE_NEW_API) + int counter = 0; + audio_info_t fdInfo; +#else + struct audio_swpar fdPar; + ma_format format; +#endif + + MA_ASSERT(pContext != NULL); + MA_ASSERT(fd >= 0); + MA_ASSERT(pInfoOut != NULL); + + (void)pContext; + (void)deviceType; + + if (ioctl(fd, AUDIO_GETDEV, &fdDevice) < 0) { + return MA_ERROR; /* Failed to retrieve device info. */ + } + + /* Name. */ + ma_strcpy_s(pInfoOut->name, sizeof(pInfoOut->name), fdDevice.name); + +#if !defined(MA_AUDIO4_USE_NEW_API) + /* Supported formats. We get this by looking at the encodings. */ + for (;;) { + audio_encoding_t encoding; + ma_format format; + + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + format = ma_format_from_encoding__audio4(encoding.encoding, encoding.precision); + if (format != ma_format_unknown) { + pInfoOut->formats[pInfoOut->formatCount++] = format; + } + + counter += 1; + } + + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + return MA_ERROR; + } + + if (deviceType == ma_device_type_playback) { + pInfoOut->minChannels = fdInfo.play.channels; + pInfoOut->maxChannels = fdInfo.play.channels; + pInfoOut->minSampleRate = fdInfo.play.sample_rate; + pInfoOut->maxSampleRate = fdInfo.play.sample_rate; + } else { + pInfoOut->minChannels = fdInfo.record.channels; + pInfoOut->maxChannels = fdInfo.record.channels; + pInfoOut->minSampleRate = fdInfo.record.sample_rate; + pInfoOut->maxSampleRate = fdInfo.record.sample_rate; + } +#else + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + return MA_ERROR; + } + + format = ma_format_from_swpar__audio4(&fdPar); + if (format == ma_format_unknown) { + return MA_FORMAT_NOT_SUPPORTED; + } + pInfoOut->formats[pInfoOut->formatCount++] = format; + + if (deviceType == ma_device_type_playback) { + pInfoOut->minChannels = fdPar.pchan; + pInfoOut->maxChannels = fdPar.pchan; + } else { + pInfoOut->minChannels = fdPar.rchan; + pInfoOut->maxChannels = fdPar.rchan; + } + + pInfoOut->minSampleRate = fdPar.rate; + pInfoOut->maxSampleRate = fdPar.rate; +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + const int maxDevices = 64; + char devpath[256]; + int iDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* + Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN" + version here since we can open it even when another process has control of the "/dev/audioN" device. + */ + for (iDevice = 0; iDevice < maxDevices; ++iDevice) { + struct stat st; + int fd; + ma_bool32 isTerminating = MA_FALSE; + + ma_strcpy_s(devpath, sizeof(devpath), "/dev/audioctl"); + ma_itoa_s(iDevice, devpath+strlen(devpath), sizeof(devpath)-strlen(devpath), 10); + + if (stat(devpath, &st) < 0) { + break; + } + + /* The device exists, but we need to check if it's usable as playback and/or capture. */ + + /* Playback. */ + if (!isTerminating) { + fd = open(devpath, O_RDONLY, 0); + if (fd >= 0) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + close(fd); + } + } + + /* Capture. */ + if (!isTerminating) { + fd = open(devpath, O_WRONLY, 0); + if (fd >= 0) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + close(fd); + } + } + + if (isTerminating) { + break; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + int fd = -1; + int deviceIndex = -1; + char ctlid[256]; + ma_result result; + + MA_ASSERT(pContext != NULL); + (void)shareMode; + + /* + We need to open the "/dev/audioctlN" device to get the info. To do this we need to extract the number + from the device ID which will be in "/dev/audioN" format. + */ + if (pDeviceID == NULL) { + /* Default device. */ + ma_strcpy_s(ctlid, sizeof(ctlid), "/dev/audioctl"); + } else { + /* Specific device. We need to convert from "/dev/audioN" to "/dev/audioctlN". */ + result = ma_extract_device_index_from_id__audio4(pDeviceID->audio4, "/dev/audio", &deviceIndex); + if (result != MA_SUCCESS) { + return result; + } + + ma_construct_device_id__audio4(ctlid, sizeof(ctlid), "/dev/audioctl", deviceIndex); + } + + fd = open(ctlid, (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY, 0); + if (fd == -1) { + return MA_NO_DEVICE; + } + + if (deviceIndex == -1) { + ma_strcpy_s(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio"); + } else { + ma_construct_device_id__audio4(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio", deviceIndex); + } + + result = ma_context_get_device_info_from_fd__audio4(pContext, deviceType, fd, pDeviceInfo); + + close(fd); + return result; +} + +static void ma_device_uninit__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdPlayback); + } +} + +static ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + const char* pDefaultDeviceNames[] = { + "/dev/audio", + "/dev/audio0" + }; + int fd; + int fdFlags = 0; +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ + audio_info_t fdInfo; +#else + struct audio_swpar fdPar; +#endif + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + (void)pContext; + + /* The first thing to do is open the file. */ + if (deviceType == ma_device_type_capture) { + fdFlags = O_RDONLY; + } else { + fdFlags = O_WRONLY; + } + /*fdFlags |= O_NONBLOCK;*/ + + if ((deviceType == ma_device_type_capture && pConfig->capture.pDeviceID == NULL) || (deviceType == ma_device_type_playback && pConfig->playback.pDeviceID == NULL)) { + /* Default device. */ + size_t iDevice; + for (iDevice = 0; iDevice < ma_countof(pDefaultDeviceNames); ++iDevice) { + fd = open(pDefaultDeviceNames[iDevice], fdFlags, 0); + if (fd != -1) { + break; + } + } + } else { + /* Specific device. */ + fd = open((deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID->audio4 : pConfig->playback.pDeviceID->audio4, fdFlags, 0); + } + + if (fd == -1) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to open device.", ma_result_from_errno(errno)); + } + +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ + AUDIO_INITINFO(&fdInfo); + + /* We get the driver to do as much of the data conversion as possible. */ + if (deviceType == ma_device_type_capture) { + fdInfo.mode = AUMODE_RECORD; + ma_encoding_from_format__audio4(pConfig->capture.format, &fdInfo.record.encoding, &fdInfo.record.precision); + fdInfo.record.channels = pConfig->capture.channels; + fdInfo.record.sample_rate = pConfig->sampleRate; + } else { + fdInfo.mode = AUMODE_PLAY; + ma_encoding_from_format__audio4(pConfig->playback.format, &fdInfo.play.encoding, &fdInfo.play.precision); + fdInfo.play.channels = pConfig->playback.channels; + fdInfo.play.sample_rate = pConfig->sampleRate; + } + + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device format. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED); + } + + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed.", MA_FORMAT_NOT_SUPPORTED); + } + + if (deviceType == ma_device_type_capture) { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.record); + internalChannels = fdInfo.record.channels; + internalSampleRate = fdInfo.record.sample_rate; + } else { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.play); + internalChannels = fdInfo.play.channels; + internalSampleRate = fdInfo.play.sample_rate; + } + + if (internalFormat == ma_format_unknown) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED); + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = pConfig->periodSizeInFrames; + if (internalPeriodSizeInFrames == 0) { + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate); + } + + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + internalPeriods = pConfig->periods; + if (internalPeriods < 2) { + internalPeriods = 2; + } + + /* What miniaudio calls a period, audio4 calls a block. */ + AUDIO_INITINFO(&fdInfo); + fdInfo.hiwat = internalPeriods; + fdInfo.lowat = internalPeriods-1; + fdInfo.blocksize = internalPeriodSizeInBytes; + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set internal buffer size. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED); + } + + internalPeriods = fdInfo.hiwat; + internalPeriodSizeInFrames = fdInfo.blocksize / ma_get_bytes_per_frame(internalFormat, internalChannels); + } +#else + /* We need to retrieve the format of the device so we can know the channel count and sample rate. Then we can calculate the buffer size. */ + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve initial device parameters.", MA_FORMAT_NOT_SUPPORTED); + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + + if (internalFormat == ma_format_unknown) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED); + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = pConfig->periodSizeInFrames; + if (internalPeriodSizeInFrames == 0) { + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate); + } + + /* What miniaudio calls a period, audio4 calls a block. */ + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + fdPar.nblks = pConfig->periods; + fdPar.round = internalPeriodSizeInBytes; + + if (ioctl(fd, AUDIO_SETPAR, &fdPar) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device parameters.", MA_FORMAT_NOT_SUPPORTED); + } + + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve actual device parameters.", MA_FORMAT_NOT_SUPPORTED); + } + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + internalPeriods = fdPar.nblks; + internalPeriodSizeInFrames = fdPar.round / ma_get_bytes_per_frame(internalFormat, internalChannels); +#endif + + if (internalFormat == ma_format_unknown) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED); + } + + if (deviceType == ma_device_type_capture) { + pDevice->audio4.fdCapture = fd; + pDevice->capture.internalFormat = internalFormat; + pDevice->capture.internalChannels = internalChannels; + pDevice->capture.internalSampleRate = internalSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->capture.internalPeriods = internalPeriods; + } else { + pDevice->audio4.fdPlayback = fd; + pDevice->playback.internalFormat = internalFormat; + pDevice->playback.internalChannels = internalChannels; + pDevice->playback.internalSampleRate = internalSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->playback.internalPeriods = internalPeriods; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->audio4); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + pDevice->audio4.fdCapture = -1; + pDevice->audio4.fdPlayback = -1; + + /* + The version of the operating system dictates whether or not the device is exclusive or shared. NetBSD + introduced in-kernel mixing which means it's shared. All other BSD flavours are exclusive as far as + I'm aware. + */ +#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 800000000 + /* NetBSD 8.0+ */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } +#else + /* All other flavors. */ +#endif + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_device_start__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdCapture == -1) { + return MA_INVALID_ARGS; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdPlayback == -1) { + return MA_INVALID_ARGS; + } + } + + return MA_SUCCESS; +} +#endif + +static ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) +{ + if (fd == -1) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_AUDIO4_USE_NEW_API) + if (ioctl(fd, AUDIO_FLUSH, 0) < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_FLUSH failed.", ma_result_from_errno(errno)); + } +#else + if (ioctl(fd, AUDIO_STOP, 0) < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_STOP failed.", ma_result_from_errno(errno)); + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result; + + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result; + + /* Drain the device first. If this fails we'll just need to flush without draining. Unfortunately draining isn't available on newer version of OpenBSD. */ + #if !defined(MA_AUDIO4_USE_NEW_API) + ioctl(pDevice->audio4.fdPlayback, AUDIO_DRAIN, 0); + #endif + + /* Here is where the device is stopped immediately. */ + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = write(pDevice->audio4.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to write data to the device.", ma_result_from_errno(errno)); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = read(pDevice->audio4.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to read data from the device.", ma_result_from_errno(errno)); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_main_loop__audio4(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + /* No need to explicitly start the device like the other backends. */ + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__audio4(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__audio4(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__audio4()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__audio4(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[8192]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__audio4(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + + /* Here is where the device is stopped. */ + ma_device_stop__audio4(pDevice); + + return result; +} + +static ma_result ma_context_uninit__audio4(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_audio4); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__audio4(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->onUninit = ma_context_uninit__audio4; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__audio4; + pContext->onEnumDevices = ma_context_enumerate_devices__audio4; + pContext->onGetDeviceInfo = ma_context_get_device_info__audio4; + pContext->onDeviceInit = ma_device_init__audio4; + pContext->onDeviceUninit = ma_device_uninit__audio4; + pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceMainLoop = ma_device_main_loop__audio4; + + return MA_SUCCESS; +} +#endif /* audio4 */ + + +/****************************************************************************** + +OSS Backend + +******************************************************************************/ +#ifdef MA_HAS_OSS +#include +#include +#include +#include + +#ifndef SNDCTL_DSP_HALT +#define SNDCTL_DSP_HALT SNDCTL_DSP_RESET +#endif + +static int ma_open_temp_device__oss() +{ + /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */ + int fd = open("/dev/mixer", O_RDONLY, 0); + if (fd >= 0) { + return fd; + } + + return -1; +} + +static ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd) +{ + const char* deviceName; + int flags; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pfd != NULL); + (void)pContext; + + *pfd = -1; + + /* This function should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + deviceName = "/dev/dsp"; + if (pDeviceID != NULL) { + deviceName = pDeviceID->oss; + } + + flags = (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY; + if (shareMode == ma_share_mode_exclusive) { + flags |= O_EXCL; + } + + *pfd = open(deviceName, flags, 0); + if (*pfd == -1) { + return ma_result_from_errno(errno); + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_context_is_device_id_equal__oss(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->oss, pID1->oss) == 0; +} + +static ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int fd; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + fd = ma_open_temp_device__oss(); + if (fd == -1) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND); + } + + result = ioctl(fd, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fd, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ai.devnode[0] != '\0') { /* <-- Can be blank, according to documentation. */ + ma_device_info deviceInfo; + ma_bool32 isTerminating = MA_FALSE; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID */ + ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1); + } + + /* The device can be both playback and capture. */ + if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + if (isTerminating) { + break; + } + } + } + } + } else { + close(fd); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND); + } + + close(fd); + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_bool32 foundDevice; + int fdTemp; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + (void)shareMode; + + /* Handle the default device a little differently. */ + if (pDeviceID == NULL) { + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + return MA_SUCCESS; + } + + + /* If we get here it means we are _not_ using the default device. */ + foundDevice = MA_FALSE; + + fdTemp = ma_open_temp_device__oss(); + if (fdTemp == -1) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND); + } + + result = ioctl(fdTemp, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) { + /* It has the same name, so now just confirm the type. */ + if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) || + (deviceType == ma_device_type_capture && ((ai.caps & PCM_CAP_INPUT) != 0))) { + unsigned int formatMask; + + /* ID */ + ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.name, (size_t)-1); + } + + pDeviceInfo->minChannels = ai.min_channels; + pDeviceInfo->maxChannels = ai.max_channels; + pDeviceInfo->minSampleRate = ai.min_rate; + pDeviceInfo->maxSampleRate = ai.max_rate; + pDeviceInfo->formatCount = 0; + + if (deviceType == ma_device_type_playback) { + formatMask = ai.oformats; + } else { + formatMask = ai.iformats; + } + + if ((formatMask & AFMT_U8) != 0) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8; + } + if (((formatMask & AFMT_S16_LE) != 0 && ma_is_little_endian()) || (AFMT_S16_BE && ma_is_big_endian())) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16; + } + if (((formatMask & AFMT_S32_LE) != 0 && ma_is_little_endian()) || (AFMT_S32_BE && ma_is_big_endian())) { + pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32; + } + + foundDevice = MA_TRUE; + break; + } + } + } + } + } else { + close(fdTemp); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND); + } + + + close(fdTemp); + + if (!foundDevice) { + return MA_NO_DEVICE; + } + + return MA_SUCCESS; +} + +static void ma_device_uninit__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdPlayback); + } +} + +static int ma_format_to_oss(ma_format format) +{ + int ossFormat = AFMT_U8; + switch (format) { + case ma_format_s16: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_s24: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_s32: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_f32: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_u8: + default: ossFormat = AFMT_U8; break; + } + + return ossFormat; +} + +static ma_format ma_format_from_oss(int ossFormat) +{ + if (ossFormat == AFMT_U8) { + return ma_format_u8; + } else { + if (ma_is_little_endian()) { + switch (ossFormat) { + case AFMT_S16_LE: return ma_format_s16; + case AFMT_S32_LE: return ma_format_s32; + default: return ma_format_unknown; + } + } else { + switch (ossFormat) { + case AFMT_S16_BE: return ma_format_s16; + case AFMT_S32_BE: return ma_format_s32; + default: return ma_format_unknown; + } + } + } + + return ma_format_unknown; +} + +static ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + ma_result result; + int ossResult; + int fd; + const ma_device_id* pDeviceID = NULL; + ma_share_mode shareMode; + int ossFormat; + int ossChannels; + int ossSampleRate; + int ossFragment; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + (void)pContext; + + if (deviceType == ma_device_type_capture) { + pDeviceID = pConfig->capture.pDeviceID; + shareMode = pConfig->capture.shareMode; + ossFormat = ma_format_to_oss(pConfig->capture.format); + ossChannels = (int)pConfig->capture.channels; + ossSampleRate = (int)pConfig->sampleRate; + } else { + pDeviceID = pConfig->playback.pDeviceID; + shareMode = pConfig->playback.shareMode; + ossFormat = ma_format_to_oss(pConfig->playback.format); + ossChannels = (int)pConfig->playback.channels; + ossSampleRate = (int)pConfig->sampleRate; + } + + result = ma_context_open_device__oss(pContext, deviceType, pDeviceID, shareMode, &fd); + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result); + } + + /* + The OSS documantation is very clear about the order we should be initializing the device's properties: + 1) Format + 2) Channels + 3) Sample rate. + */ + + /* Format. */ + ossResult = ioctl(fd, SNDCTL_DSP_SETFMT, &ossFormat); + if (ossResult == -1) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set format.", MA_FORMAT_NOT_SUPPORTED); + } + + /* Channels. */ + ossResult = ioctl(fd, SNDCTL_DSP_CHANNELS, &ossChannels); + if (ossResult == -1) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set channel count.", MA_FORMAT_NOT_SUPPORTED); + } + + /* Sample Rate. */ + ossResult = ioctl(fd, SNDCTL_DSP_SPEED, &ossSampleRate); + if (ossResult == -1) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set sample rate.", MA_FORMAT_NOT_SUPPORTED); + } + + /* + Buffer. + + The documentation says that the fragment settings should be set as soon as possible, but I'm not sure if + it should be done before or after format/channels/rate. + + OSS wants the fragment size in bytes and a power of 2. When setting, we specify the power, not the actual + value. + */ + { + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInBytes; + ma_uint32 ossFragmentSizePower; + + periodSizeInFrames = pConfig->periodSizeInFrames; + if (periodSizeInFrames == 0) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, (ma_uint32)ossSampleRate); + } + + periodSizeInBytes = ma_round_to_power_of_2(periodSizeInFrames * ma_get_bytes_per_frame(ma_format_from_oss(ossFormat), ossChannels)); + if (periodSizeInBytes < 16) { + periodSizeInBytes = 16; + } + + ossFragmentSizePower = 4; + periodSizeInBytes >>= 4; + while (periodSizeInBytes >>= 1) { + ossFragmentSizePower += 1; + } + + ossFragment = (int)((pConfig->periods << 16) | ossFragmentSizePower); + ossResult = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &ossFragment); + if (ossResult == -1) { + close(fd); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set fragment size and period count.", MA_FORMAT_NOT_SUPPORTED); + } + } + + /* Internal settings. */ + if (deviceType == ma_device_type_capture) { + pDevice->oss.fdCapture = fd; + pDevice->capture.internalFormat = ma_format_from_oss(ossFormat); + pDevice->capture.internalChannels = ossChannels; + pDevice->capture.internalSampleRate = ossSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalPeriods = (ma_uint32)(ossFragment >> 16); + pDevice->capture.internalPeriodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + + if (pDevice->capture.internalFormat == ma_format_unknown) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED); + } + } else { + pDevice->oss.fdPlayback = fd; + pDevice->playback.internalFormat = ma_format_from_oss(ossFormat); + pDevice->playback.internalChannels = ossChannels; + pDevice->playback.internalSampleRate = ossSampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalPeriods = (ma_uint32)(ossFragment >> 16); + pDevice->playback.internalPeriodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + + if (pDevice->playback.internalFormat == ma_format_unknown) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->oss); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result); + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + We want to use SNDCTL_DSP_HALT. From the documentation: + + In multithreaded applications SNDCTL_DSP_HALT (SNDCTL_DSP_RESET) must only be called by the thread + that actually reads/writes the audio device. It must not be called by some master thread to kill the + audio thread. The audio thread will not stop or get any kind of notification that the device was + stopped by the master thread. The device gets stopped but the next read or write call will silently + restart the device. + + This is actually safe in our case, because this function is only ever called from within our worker + thread anyway. Just keep this in mind, though... + */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + int result = ioctl(pDevice->oss.fdCapture, SNDCTL_DSP_HALT, 0); + if (result == -1) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", ma_result_from_errno(errno)); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + int result = ioctl(pDevice->oss.fdPlayback, SNDCTL_DSP_HALT, 0); + if (result == -1) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", ma_result_from_errno(errno)); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int resultOSS; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + resultOSS = write(pDevice->oss.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (resultOSS < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to send data from the client to the device.", ma_result_from_errno(errno)); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int resultOSS; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + resultOSS = read(pDevice->oss.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (resultOSS < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to read data from the device to be sent to the client.", ma_result_from_errno(errno)); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_main_loop__oss(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + + /* No need to explicitly start the device like the other backends. */ + + while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + /* The process is: device_read -> convert -> callback -> convert -> device_write */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = ma_device_read__oss(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = ma_device_write__oss(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__oss()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + { + /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > intermediaryBufferSizeInFrames) { + framesToReadThisIteration = intermediaryBufferSizeInFrames; + } + + result = ma_device_read__oss(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) { + framesToWriteThisIteration = intermediaryBufferSizeInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer); + + result = ma_device_write__oss(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* To silence a warning. Will never hit this. */ + case ma_device_type_loopback: + default: break; + } + } + + + /* Here is where the device is stopped. */ + ma_device_stop__oss(pDevice); + + return result; +} + +static ma_result ma_context_uninit__oss(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_oss); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__oss(const ma_context_config* pConfig, ma_context* pContext) +{ + int fd; + int ossVersion; + int result; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + /* Try opening a temporary device first so we can get version information. This is closed at the end. */ + fd = ma_open_temp_device__oss(); + if (fd == -1) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open temporary device for retrieving system properties.", MA_NO_BACKEND); /* Looks liks OSS isn't installed, or there are no available devices. */ + } + + /* Grab the OSS version. */ + ossVersion = 0; + result = ioctl(fd, OSS_GETVERSION, &ossVersion); + if (result == -1) { + close(fd); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve OSS version.", MA_NO_BACKEND); + } + + pContext->oss.versionMajor = ((ossVersion & 0xFF0000) >> 16); + pContext->oss.versionMinor = ((ossVersion & 0x00FF00) >> 8); + + pContext->onUninit = ma_context_uninit__oss; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__oss; + pContext->onEnumDevices = ma_context_enumerate_devices__oss; + pContext->onGetDeviceInfo = ma_context_get_device_info__oss; + pContext->onDeviceInit = ma_device_init__oss; + pContext->onDeviceUninit = ma_device_uninit__oss; + pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */ + pContext->onDeviceMainLoop = ma_device_main_loop__oss; + + close(fd); + return MA_SUCCESS; +} +#endif /* OSS */ + + +/****************************************************************************** + +AAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_AAUDIO +/*#include */ + +#define MA_AAUDIO_UNSPECIFIED 0 + +typedef int32_t ma_aaudio_result_t; +typedef int32_t ma_aaudio_direction_t; +typedef int32_t ma_aaudio_sharing_mode_t; +typedef int32_t ma_aaudio_format_t; +typedef int32_t ma_aaudio_stream_state_t; +typedef int32_t ma_aaudio_performance_mode_t; +typedef int32_t ma_aaudio_data_callback_result_t; + +/* Result codes. miniaudio only cares about the success code. */ +#define MA_AAUDIO_OK 0 + +/* Directions. */ +#define MA_AAUDIO_DIRECTION_OUTPUT 0 +#define MA_AAUDIO_DIRECTION_INPUT 1 + +/* Sharing modes. */ +#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE 0 +#define MA_AAUDIO_SHARING_MODE_SHARED 1 + +/* Formats. */ +#define MA_AAUDIO_FORMAT_PCM_I16 1 +#define MA_AAUDIO_FORMAT_PCM_FLOAT 2 + +/* Stream states. */ +#define MA_AAUDIO_STREAM_STATE_UNINITIALIZED 0 +#define MA_AAUDIO_STREAM_STATE_UNKNOWN 1 +#define MA_AAUDIO_STREAM_STATE_OPEN 2 +#define MA_AAUDIO_STREAM_STATE_STARTING 3 +#define MA_AAUDIO_STREAM_STATE_STARTED 4 +#define MA_AAUDIO_STREAM_STATE_PAUSING 5 +#define MA_AAUDIO_STREAM_STATE_PAUSED 6 +#define MA_AAUDIO_STREAM_STATE_FLUSHING 7 +#define MA_AAUDIO_STREAM_STATE_FLUSHED 8 +#define MA_AAUDIO_STREAM_STATE_STOPPING 9 +#define MA_AAUDIO_STREAM_STATE_STOPPED 10 +#define MA_AAUDIO_STREAM_STATE_CLOSING 11 +#define MA_AAUDIO_STREAM_STATE_CLOSED 12 +#define MA_AAUDIO_STREAM_STATE_DISCONNECTED 13 + +/* Performance modes. */ +#define MA_AAUDIO_PERFORMANCE_MODE_NONE 10 +#define MA_AAUDIO_PERFORMANCE_MODE_POWER_SAVING 11 +#define MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY 12 + +/* Callback results. */ +#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE 0 +#define MA_AAUDIO_CALLBACK_RESULT_STOP 1 + +/* Objects. */ +typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder; +typedef struct ma_AAudioStream_t* ma_AAudioStream; + +typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames); +typedef void (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error); + +typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder); +typedef void (* MA_PFN_AAudioStreamBuilder_setDeviceId) (ma_AAudioStreamBuilder* pBuilder, int32_t deviceId); +typedef void (* MA_PFN_AAudioStreamBuilder_setDirection) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_direction_t direction); +typedef void (* MA_PFN_AAudioStreamBuilder_setSharingMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_sharing_mode_t sharingMode); +typedef void (* MA_PFN_AAudioStreamBuilder_setFormat) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_format_t format); +typedef void (* MA_PFN_AAudioStreamBuilder_setChannelCount) (ma_AAudioStreamBuilder* pBuilder, int32_t channelCount); +typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) (ma_AAudioStreamBuilder* pBuilder, int32_t sampleRate); +typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setErrorCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_errorCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream); +typedef ma_aaudio_stream_state_t (* MA_PFN_AAudioStream_getState) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_waitForStateChange) (ma_AAudioStream* pStream, ma_aaudio_stream_state_t inputState, ma_aaudio_stream_state_t* pNextState, int64_t timeoutInNanoseconds); +typedef ma_aaudio_format_t (* MA_PFN_AAudioStream_getFormat) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getChannelCount) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getSampleRate) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getBufferCapacityInFrames) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerDataCallback) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream); + +static ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) +{ + switch (resultAA) + { + case MA_AAUDIO_OK: return MA_SUCCESS; + default: break; + } + + return MA_ERROR; +} + +static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + (void)error; + +#if defined(MA_DEBUG_OUTPUT) + printf("[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream)); +#endif + + /* + From the documentation for AAudio, when a device is disconnected all we can do is stop it. However, we cannot stop it from the callback - we need + to do it from another thread. Therefore we are going to use an event thread for the AAudio backend to do this cleanly and safely. + */ + if (((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream) == MA_AAUDIO_STREAM_STATE_DISCONNECTED) { +#if defined(MA_DEBUG_OUTPUT) + printf("[AAudio] Device Disconnected.\n"); +#endif + } +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, frameCount, pAudioData); /* Send directly to the client. */ + } + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, frameCount, pAudioData); /* Read directly from the client. */ + } + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, const ma_device_config* pConfig, const ma_device* pDevice, ma_AAudioStream** ppStream) +{ + ma_AAudioStreamBuilder* pBuilder; + ma_aaudio_result_t resultAA; + + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */ + + *ppStream = NULL; + + resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (pDeviceID != NULL) { + ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio); + } + + ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT); + ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE); + + if (pConfig != NULL) { + ma_uint32 bufferCapacityInFrames; + + if (pDevice == NULL || !pDevice->usingDefaultSampleRate) { + ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pConfig->sampleRate); + } + + if (deviceType == ma_device_type_capture) { + if (pDevice == NULL || !pDevice->capture.usingDefaultChannels) { + ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->capture.channels); + } + if (pDevice == NULL || !pDevice->capture.usingDefaultFormat) { + ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->capture.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT); + } + } else { + if (pDevice == NULL || !pDevice->playback.usingDefaultChannels) { + ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->playback.channels); + } + if (pDevice == NULL || !pDevice->playback.usingDefaultFormat) { + ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->playback.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT); + } + } + + bufferCapacityInFrames = pConfig->periodSizeInFrames * pConfig->periods; + if (bufferCapacityInFrames == 0) { + bufferCapacityInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate) * pConfig->periods; + } + + ((MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames)(pBuilder, bufferCapacityInFrames); + ((MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback)pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback)(pBuilder, bufferCapacityInFrames / pConfig->periods); + + if (deviceType == ma_device_type_capture) { + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_capture__aaudio, (void*)pDevice); + } else { + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_playback__aaudio, (void*)pDevice); + } + + /* Not sure how this affects things, but since there's a mapping between miniaudio's performance profiles and AAudio's performance modes, let go ahead and set it. */ + ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE); + } + + ((MA_PFN_AAudioStreamBuilder_setErrorCallback)pContext->aaudio.AAudioStreamBuilder_setErrorCallback)(pBuilder, ma_stream_error_callback__aaudio, (void*)pDevice); + + resultAA = ((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream); + if (resultAA != MA_AAUDIO_OK) { + *ppStream = NULL; + ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder); + return ma_result_from_aaudio(resultAA); + } + + ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder); + return MA_SUCCESS; +} + +static ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream) +{ + return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream)); +} + +static ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType) +{ + /* The only way to know this is to try creating a stream. */ + ma_AAudioStream* pStream; + ma_result result = ma_open_stream__aaudio(pContext, deviceType, NULL, ma_share_mode_shared, NULL, NULL, &pStream); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + ma_close_stream__aaudio(pContext, pStream); + return MA_TRUE; +} + +static ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState) +{ + ma_aaudio_stream_state_t actualNewState; + ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */ + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (newState != actualNewState) { + return MA_ERROR; /* Failed to transition into the expected state. */ + } + + return MA_SUCCESS; +} + + +static ma_bool32 ma_context_is_device_id_equal__aaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->aaudio == pID1->aaudio; +} + +static ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_playback)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_capture)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_AAudioStream* pStream; + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* No exclusive mode with AAudio. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* ID */ + if (pDeviceID != NULL) { + pDeviceInfo->id.aaudio = pDeviceID->aaudio; + } else { + pDeviceInfo->id.aaudio = MA_AAUDIO_UNSPECIFIED; + } + + /* Name */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + + /* We'll need to open the device to get accurate sample rate and channel count information. */ + result = ma_open_stream__aaudio(pContext, deviceType, pDeviceID, shareMode, NULL, NULL, &pStream); + if (result != MA_SUCCESS) { + return result; + } + + pDeviceInfo->minChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)(pStream); + pDeviceInfo->maxChannels = pDeviceInfo->minChannels; + pDeviceInfo->minSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)(pStream); + pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; + + ma_close_stream__aaudio(pContext, pStream); + pStream = NULL; + + + /* AAudio supports s16 and f32. */ + pDeviceInfo->formatCount = 2; + pDeviceInfo->formats[0] = ma_format_s16; + pDeviceInfo->formats[1] = ma_format_f32; + + return MA_SUCCESS; +} + + +static void ma_device_uninit__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + pDevice->aaudio.pStreamCapture = NULL; + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + pDevice->aaudio.pStreamPlayback = NULL; + } + + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->aaudio.duplexRB); + } +} + +static ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with AAudio. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* We first need to try opening the stream. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + int32_t bufferCapacityInFrames; + int32_t framesPerDataCallback; + + result = ma_open_stream__aaudio(pContext, ma_device_type_capture, pConfig->capture.pDeviceID, pConfig->capture.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; /* Failed to open the AAudio stream. */ + } + + pDevice->capture.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32; + pDevice->capture.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + pDevice->capture.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */ + + bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + + if (framesPerDataCallback > 0) { + pDevice->capture.internalPeriodSizeInFrames = framesPerDataCallback; + pDevice->capture.internalPeriods = bufferCapacityInFrames / framesPerDataCallback; + } else { + pDevice->capture.internalPeriodSizeInFrames = bufferCapacityInFrames; + pDevice->capture.internalPeriods = 1; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + int32_t bufferCapacityInFrames; + int32_t framesPerDataCallback; + + result = ma_open_stream__aaudio(pContext, ma_device_type_playback, pConfig->playback.pDeviceID, pConfig->playback.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; /* Failed to open the AAudio stream. */ + } + + pDevice->playback.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32; + pDevice->playback.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + pDevice->playback.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */ + + bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + + if (framesPerDataCallback > 0) { + pDevice->playback.internalPeriodSizeInFrames = framesPerDataCallback; + pDevice->playback.internalPeriods = bufferCapacityInFrames / framesPerDataCallback; + } else { + pDevice->playback.internalPeriodSizeInFrames = bufferCapacityInFrames; + pDevice->playback.internalPeriods = 1; + } + } + + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * pDevice->capture.internalPeriods; + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->aaudio.duplexRB); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + } + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[AAudio] Failed to initialize ring buffer.", result); + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->aaudio.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->aaudio.duplexRB, marginSizeInFrames, pMarginData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* Do we actually need to wait for the device to transition into it's started state? */ + + /* The device should be in either a starting or started state. If it's not set to started we need to wait for it to transition. It should go from starting to started. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STARTED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STARTING) { + return MA_ERROR; /* Expecting the stream to be a starting or started state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STARTED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + /* + From the AAudio documentation: + + The stream will stop after all of the data currently buffered has been played. + + This maps with miniaudio's requirement that device's be drained which means we don't need to implement any draining logic. + */ + + resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* The device should be in either a stopping or stopped state. If it's not set to started we need to wait for it to transition. It should go from stopping to stopped. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPING) { + return MA_ERROR; /* Expecting the stream to be a stopping or stopped state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STOPPED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_duplex) { + ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__aaudio(ma_device* pDevice) +{ + ma_stop_proc onStop; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__aaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_aaudio); + + ma_dlclose(pContext, pContext->aaudio.hAAudio); + pContext->aaudio.hAAudio = NULL; + + return MA_SUCCESS; +} + +static ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext) +{ + const char* libNames[] = { + "libaaudio.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libNames); ++i) { + pContext->aaudio.hAAudio = ma_dlopen(pContext, libNames[i]); + if (pContext->aaudio.hAAudio != NULL) { + break; + } + } + + if (pContext->aaudio.hAAudio == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudio_createStreamBuilder"); + pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_delete"); + pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDeviceId"); + pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDirection"); + pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSharingMode"); + pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFormat"); + pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setChannelCount"); + pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSampleRate"); + pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames"); + pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setErrorCallback"); + pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode"); + pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream"); + pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_close"); + pContext->aaudio.AAudioStream_getState = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getState"); + pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_waitForStateChange"); + pContext->aaudio.AAudioStream_getFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFormat"); + pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getChannelCount"); + pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getSampleRate"); + pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getBufferCapacityInFrames"); + pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerDataCallback"); + pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerBurst"); + pContext->aaudio.AAudioStream_requestStart = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStart"); + pContext->aaudio.AAudioStream_requestStop = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStop"); + + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__aaudio; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__aaudio; + pContext->onEnumDevices = ma_context_enumerate_devices__aaudio; + pContext->onGetDeviceInfo = ma_context_get_device_info__aaudio; + pContext->onDeviceInit = ma_device_init__aaudio; + pContext->onDeviceUninit = ma_device_uninit__aaudio; + pContext->onDeviceStart = ma_device_start__aaudio; + pContext->onDeviceStop = ma_device_stop__aaudio; + + (void)pConfig; + return MA_SUCCESS; +} +#endif /* AAudio */ + + +/****************************************************************************** + +OpenSL|ES Backend + +******************************************************************************/ +#ifdef MA_HAS_OPENSL +#include +#ifdef MA_ANDROID +#include +#endif + +/* OpenSL|ES has one-per-application objects :( */ +SLObjectItf g_maEngineObjectSL = NULL; +SLEngineItf g_maEngineSL = NULL; +ma_uint32 g_maOpenSLInitCounter = 0; + +#define MA_OPENSL_OBJ(p) (*((SLObjectItf)(p))) +#define MA_OPENSL_OUTPUTMIX(p) (*((SLOutputMixItf)(p))) +#define MA_OPENSL_PLAY(p) (*((SLPlayItf)(p))) +#define MA_OPENSL_RECORD(p) (*((SLRecordItf)(p))) + +#ifdef MA_ANDROID +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p))) +#else +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p))) +#endif + +static ma_result ma_result_from_OpenSL(SLuint32 result) +{ + switch (result) + { + case SL_RESULT_SUCCESS: return MA_SUCCESS; + case SL_RESULT_PRECONDITIONS_VIOLATED: return MA_ERROR; + case SL_RESULT_PARAMETER_INVALID: return MA_INVALID_ARGS; + case SL_RESULT_MEMORY_FAILURE: return MA_OUT_OF_MEMORY; + case SL_RESULT_RESOURCE_ERROR: return MA_INVALID_DATA; + case SL_RESULT_RESOURCE_LOST: return MA_ERROR; + case SL_RESULT_IO_ERROR: return MA_IO_ERROR; + case SL_RESULT_BUFFER_INSUFFICIENT: return MA_NO_SPACE; + case SL_RESULT_CONTENT_CORRUPTED: return MA_INVALID_DATA; + case SL_RESULT_CONTENT_UNSUPPORTED: return MA_FORMAT_NOT_SUPPORTED; + case SL_RESULT_CONTENT_NOT_FOUND: return MA_ERROR; + case SL_RESULT_PERMISSION_DENIED: return MA_ACCESS_DENIED; + case SL_RESULT_FEATURE_UNSUPPORTED: return MA_NOT_IMPLEMENTED; + case SL_RESULT_INTERNAL_ERROR: return MA_ERROR; + case SL_RESULT_UNKNOWN_ERROR: return MA_ERROR; + case SL_RESULT_OPERATION_ABORTED: return MA_ERROR; + case SL_RESULT_CONTROL_LOST: return MA_ERROR; + default: return MA_ERROR; + } +} + +/* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) +{ + switch (id) + { + case SL_SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SL_SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SL_SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SL_SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SL_SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SL_SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SL_SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SL_SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SL_SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SL_SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SL_SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SL_SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SL_SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SL_SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SL_SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SL_SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SL_SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SL_SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */ +static SLuint32 ma_channel_id_to_opensl(ma_uint8 id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SL_SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SL_SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SL_SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SL_SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SL_SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SL_SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SL_SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SL_SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SL_SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SL_SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SL_SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SL_SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SL_SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SL_SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SL_SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SL_SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SL_SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to an OpenSL-style channel mask. */ +static SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) +{ + SLuint32 channelMask = 0; + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + channelMask |= ma_channel_id_to_opensl(channelMap[iChannel]); + } + + return channelMask; +} + +/* Converts an OpenSL-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + if (channels == 1 && channelMask == 0) { + channelMap[0] = MA_CHANNEL_MONO; + } else if (channels == 2 && channelMask == 0) { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + if (channels == 1 && (channelMask & SL_SPEAKER_FRONT_CENTER) != 0) { + channelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + for (iBit = 0; iBit < 32; ++iBit) { + SLuint32 bitValue = (channelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + channelMap[iChannel] = ma_channel_id_to_ma__opensl(bitValue); + iChannel += 1; + } + } + } + } +} + +static SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) +{ + if (samplesPerSec <= SL_SAMPLINGRATE_8) { + return SL_SAMPLINGRATE_8; + } + if (samplesPerSec <= SL_SAMPLINGRATE_11_025) { + return SL_SAMPLINGRATE_11_025; + } + if (samplesPerSec <= SL_SAMPLINGRATE_12) { + return SL_SAMPLINGRATE_12; + } + if (samplesPerSec <= SL_SAMPLINGRATE_16) { + return SL_SAMPLINGRATE_16; + } + if (samplesPerSec <= SL_SAMPLINGRATE_22_05) { + return SL_SAMPLINGRATE_22_05; + } + if (samplesPerSec <= SL_SAMPLINGRATE_24) { + return SL_SAMPLINGRATE_24; + } + if (samplesPerSec <= SL_SAMPLINGRATE_32) { + return SL_SAMPLINGRATE_32; + } + if (samplesPerSec <= SL_SAMPLINGRATE_44_1) { + return SL_SAMPLINGRATE_44_1; + } + if (samplesPerSec <= SL_SAMPLINGRATE_48) { + return SL_SAMPLINGRATE_48; + } + + /* Android doesn't support more than 48000. */ +#ifndef MA_ANDROID + if (samplesPerSec <= SL_SAMPLINGRATE_64) { + return SL_SAMPLINGRATE_64; + } + if (samplesPerSec <= SL_SAMPLINGRATE_88_2) { + return SL_SAMPLINGRATE_88_2; + } + if (samplesPerSec <= SL_SAMPLINGRATE_96) { + return SL_SAMPLINGRATE_96; + } + if (samplesPerSec <= SL_SAMPLINGRATE_192) { + return SL_SAMPLINGRATE_192; + } +#endif + + return SL_SAMPLINGRATE_16; +} + + +static ma_bool32 ma_context_is_device_id_equal__opensl(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->opensl == pID1->opensl; +} + +static ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + ma_bool32 isTerminated = MA_FALSE; + + SLuint32 pDeviceIDs[128]; + SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]); + + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + /* Playback */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + /* Capture */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + return MA_SUCCESS; +#else + goto return_default_device; +#endif + +return_default_device:; + cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* No exclusive mode with OpenSL|ES. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + if (deviceType == ma_device_type_playback) { + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1); + } else { + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1); + } + + goto return_detailed_info; +#else + goto return_default_device; +#endif + +return_default_device: + if (pDeviceID != NULL) { + if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) || + (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + goto return_detailed_info; + + +return_detailed_info: + + /* + For now we're just outputting a set of values that are supported by the API but not necessarily supported + by the device natively. Later on we should work on this so that it more closely reflects the device's + actual native format. + */ + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = 2; + pDeviceInfo->minSampleRate = 8000; + pDeviceInfo->maxSampleRate = 48000; + pDeviceInfo->formatCount = 2; + pDeviceInfo->formats[0] = ma_format_u8; + pDeviceInfo->formats[1] = ma_format_s16; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + pDeviceInfo->formats[pDeviceInfo->formatCount] = ma_format_f32; + pDeviceInfo->formatCount += 1; +#endif + + return MA_SUCCESS; +} + + +#ifdef MA_ANDROID +/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/ +static void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* + For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like + OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this, + but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :( + */ + + /* Don't do anything if the device is not started. */ + if (pDevice->state != MA_STATE_STARTED) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingCapture) { + return; + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, pDevice->capture.internalPeriodSizeInFrames, pBuffer, &pDevice->opensl.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, pDevice->capture.internalPeriodSizeInFrames, pBuffer); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods; +} + +static void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* Don't do anything if the device is not started. */ + if (pDevice->state != MA_STATE_STARTED) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingPlayback) { + return; + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, pDevice->playback.internalPeriodSizeInFrames, pBuffer, &pDevice->opensl.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames, pBuffer); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods; +} +#endif + +static void ma_device_uninit__opensl(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioRecorderObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + } + + ma__free_from_callbacks(pDevice->opensl.pBufferCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioPlayerObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); + } + if (pDevice->opensl.pOutputMixObj) { + MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + } + + ma__free_from_callbacks(pDevice->opensl.pBufferPlayback, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->opensl.duplexRB); + } +} + +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 +typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM; +#else +typedef SLDataFormat_PCM ma_SLDataFormat_PCM; +#endif + +static ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat) +{ +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (format == ma_format_f32) { + pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX; + pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT; + } else { + pDataFormat->formatType = SL_DATAFORMAT_PCM; + } +#else + pDataFormat->formatType = SL_DATAFORMAT_PCM; +#endif + + pDataFormat->numChannels = channels; + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */ + pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format)*8; + pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels); + pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; + + /* + Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html + - Only mono and stereo is supported. + - Only u8 and s16 formats are supported. + - Maximum sample rate of 48000. + */ +#ifdef MA_ANDROID + if (pDataFormat->numChannels > 2) { + pDataFormat->numChannels = 2; + } +#if __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + /* It's floating point. */ + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + if (pDataFormat->bitsPerSample > 32) { + pDataFormat->bitsPerSample = 32; + } + } else { + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } + } +#else + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } +#endif + if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) { + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48; + } +#endif + + pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */ + + return MA_SUCCESS; +} + +static ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap) +{ + ma_bool32 isFloatingPoint = MA_FALSE; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + isFloatingPoint = MA_TRUE; + } +#endif + if (isFloatingPoint) { + if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_f32; + } + } else { + if (pDataFormat->bitsPerSample == 8) { + *pFormat = ma_format_u8; + } else if (pDataFormat->bitsPerSample == 16) { + *pFormat = ma_format_s16; + } else if (pDataFormat->bitsPerSample == 24) { + *pFormat = ma_format_s24; + } else if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_s32; + } + } + + *pChannels = pDataFormat->numChannels; + *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000; + ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, pDataFormat->numChannels, pChannelMap); + + return MA_SUCCESS; +} + +static ma_result ma_device_init__opensl(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ +#ifdef MA_ANDROID + SLDataLocator_AndroidSimpleBufferQueue queue; + SLresult resultSL; + ma_uint32 periodSizeInFrames; + size_t bufferSizeInBytes; + const SLInterfaceID itfIDs1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE}; + const SLboolean itfIDsRequired1[] = {SL_BOOLEAN_TRUE}; +#endif + + (void)pContext; + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* + For now, only supporting Android implementations of OpenSL|ES since that's the only one I've + been able to test with and I currently depend on Android-specific extensions (simple buffer + queues). + */ +#ifdef MA_ANDROID + /* No exclusive mode with OpenSL|ES. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Now we can start initializing the device properly. */ + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->opensl); + + queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; + queue.numBuffers = pConfig->periods; + + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataLocator_IODevice locatorDevice; + SLDataSource source; + SLDataSink sink; + + ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm); + + locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE; + locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT; + locatorDevice.deviceID = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl; + locatorDevice.device = NULL; + + source.pLocator = &locatorDevice; + source.pFormat = NULL; + + sink.pLocator = &queue; + sink.pFormat = (SLDataFormat_PCM*)&pcm; + + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 1; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */ + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_RECORD, &pDevice->opensl.pAudioRecorder); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", ma_result_from_OpenSL(resultSL)); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->capture.internalFormat, &pDevice->capture.internalChannels, &pDevice->capture.internalSampleRate, pDevice->capture.internalChannelMap); + + /* Buffer. */ + periodSizeInFrames = pConfig->periodSizeInFrames; + if (periodSizeInFrames == 0) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->capture.internalSampleRate); + } + pDevice->capture.internalPeriods = pConfig->periods; + pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->opensl.currentBufferIndexCapture = 0; + + bufferSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) * pDevice->capture.internalPeriods; + pDevice->opensl.pBufferCapture = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->opensl.pBufferCapture == NULL) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataSource source; + SLDataLocator_OutputMix outmixLocator; + SLDataSink sink; + + ma_SLDataFormat_PCM_init__opensl(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &pcm); + + resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", ma_result_from_OpenSL(resultSL)); + } + + /* Set the output device. */ + if (pConfig->playback.pDeviceID != NULL) { + SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl; + MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL); + } + + source.pLocator = &queue; + source.pFormat = (SLDataFormat_PCM*)&pcm; + + outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX; + outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj; + + sink.pLocator = &outmixLocator; + sink.pFormat = NULL; + + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 2; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_PLAY, &pDevice->opensl.pAudioPlayer); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", ma_result_from_OpenSL(resultSL)); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", ma_result_from_OpenSL(resultSL)); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->playback.internalFormat, &pDevice->playback.internalChannels, &pDevice->playback.internalSampleRate, pDevice->playback.internalChannelMap); + + /* Buffer. */ + periodSizeInFrames = pConfig->periodSizeInFrames; + if (periodSizeInFrames == 0) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->playback.internalSampleRate); + } + pDevice->playback.internalPeriods = pConfig->periods; + pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames; + pDevice->opensl.currentBufferIndexPlayback = 0; + + bufferSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels) * pDevice->playback.internalPeriods; + pDevice->opensl.pBufferPlayback = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->opensl.pBufferPlayback == NULL) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes); + } + + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * pDevice->capture.internalPeriods; + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->opensl.duplexRB); + if (result != MA_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to initialize ring buffer.", result); + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->opensl.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->opensl.duplexRB, marginSizeInFrames, pMarginData); + } + } + + return MA_SUCCESS; +#else + return MA_NO_BACKEND; /* Non-Android implementations are not supported. */ +#endif +} + +static ma_result ma_device_start__opensl(ma_device* pDevice) +{ + SLresult resultSL; + size_t periodSizeInBytes; + ma_uint32 iPeriod; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device.", ma_result_from_OpenSL(resultSL)); + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device.", ma_result_from_OpenSL(resultSL)); + } + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device.", ma_result_from_OpenSL(resultSL)); + } + + /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueu silent buffers. */ + if (pDevice->type == ma_device_type_duplex) { + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } else { + ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods, pDevice->opensl.pBufferPlayback); + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device.", ma_result_from_OpenSL(resultSL)); + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_drain__opensl(ma_device* pDevice, ma_device_type deviceType) +{ + SLAndroidSimpleBufferQueueItf pBufferQueue; + + MA_ASSERT(deviceType == ma_device_type_capture || deviceType == ma_device_type_playback); + + if (pDevice->type == ma_device_type_capture) { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture; + pDevice->opensl.isDrainingCapture = MA_TRUE; + } else { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback; + pDevice->opensl.isDrainingPlayback = MA_TRUE; + } + + for (;;) { + SLAndroidSimpleBufferQueueState state; + + MA_OPENSL_BUFFERQUEUE(pBufferQueue)->GetState(pBufferQueue, &state); + if (state.count == 0) { + break; + } + + ma_sleep(10); + } + + if (pDevice->type == ma_device_type_capture) { + pDevice->opensl.isDrainingCapture = MA_FALSE; + } else { + pDevice->opensl.isDrainingPlayback = MA_FALSE; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__opensl(ma_device* pDevice) +{ + SLresult resultSL; + ma_stop_proc onStop; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_capture); + + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device.", ma_result_from_OpenSL(resultSL)); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_playback); + + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device.", ma_result_from_OpenSL(resultSL)); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback); + } + + /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */ + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__opensl(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_opensl); + (void)pContext; + + /* Uninit global data. */ + if (g_maOpenSLInitCounter > 0) { + if (ma_atomic_decrement_32(&g_maOpenSLInitCounter) == 0) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_init__opensl(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + /* Initialize global data first if applicable. */ + if (ma_atomic_increment_32(&g_maOpenSLInitCounter) == 1) { + SLresult resultSL = slCreateEngine(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_atomic_decrement_32(&g_maOpenSLInitCounter); + return ma_result_from_OpenSL(resultSL); + } + + (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE); + + resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_ENGINE, &g_maEngineSL); + if (resultSL != SL_RESULT_SUCCESS) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + ma_atomic_decrement_32(&g_maOpenSLInitCounter); + return ma_result_from_OpenSL(resultSL); + } + } + + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__opensl; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__opensl; + pContext->onEnumDevices = ma_context_enumerate_devices__opensl; + pContext->onGetDeviceInfo = ma_context_get_device_info__opensl; + pContext->onDeviceInit = ma_device_init__opensl; + pContext->onDeviceUninit = ma_device_uninit__opensl; + pContext->onDeviceStart = ma_device_start__opensl; + pContext->onDeviceStop = ma_device_stop__opensl; + + return MA_SUCCESS; +} +#endif /* OpenSL|ES */ + + +/****************************************************************************** + +Web Audio Backend + +******************************************************************************/ +#ifdef MA_HAS_WEBAUDIO +#include + +static ma_bool32 ma_is_capture_supported__webaudio() +{ + return EM_ASM_INT({ + return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined); + }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */ +} + +#ifdef __cplusplus +extern "C" { +#endif +void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, (ma_uint32)frameCount, pFrames); /* Send directly to the client. */ + } +} + +void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, (ma_uint32)frameCount, pFrames); /* Read directly from the device. */ + } +} +#ifdef __cplusplus +} +#endif + +static ma_bool32 ma_context_is_device_id_equal__webaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return ma_strcmp(pID0->webaudio, pID1->webaudio) == 0; +} + +static ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Only supporting default devices for now. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + if (ma_is_capture_supported__webaudio()) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + /* No exclusive mode with Web Audio. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } + + + MA_ZERO_MEMORY(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio)); + + /* Only supporting default devices for now. */ + (void)pDeviceID; + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */ + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = MA_MAX_CHANNELS; + if (pDeviceInfo->maxChannels > 32) { + pDeviceInfo->maxChannels = 32; /* Maximum output channel count is 32 for createScriptProcessor() (JavaScript). */ + } + + /* We can query the sample rate by just using a temporary audio context. */ + pDeviceInfo->minSampleRate = EM_ASM_INT({ + try { + var temp = new (window.AudioContext || window.webkitAudioContext)(); + var sampleRate = temp.sampleRate; + temp.close(); + return sampleRate; + } catch(e) { + return 0; + } + }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; + if (pDeviceInfo->minSampleRate == 0) { + return MA_NO_DEVICE; + } + + /* Web Audio only supports f32. */ + pDeviceInfo->formatCount = 1; + pDeviceInfo->formats[0] = ma_format_f32; + + return MA_SUCCESS; +} + + +static void ma_device_uninit_by_index__webaudio(ma_device* pDevice, ma_device_type deviceType, int deviceIndex) +{ + MA_ASSERT(pDevice != NULL); + + EM_ASM({ + var device = miniaudio.get_device_by_index($0); + + /* Make sure all nodes are disconnected and marked for collection. */ + if (device.scriptNode !== undefined) { + device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */ + device.scriptNode.disconnect(); + device.scriptNode = undefined; + } + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; + } + + /* + Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want + to clear the callback before closing. + */ + device.webaudio.close(); + device.webaudio = undefined; + + /* Can't forget to free the intermediary buffer. This is the buffer that's shared between JavaScript and C. */ + if (device.intermediaryBuffer !== undefined) { + Module._free(device.intermediaryBuffer); + device.intermediaryBuffer = undefined; + device.intermediaryBufferView = undefined; + device.intermediaryBufferSizeInBytes = undefined; + } + + /* Make sure the device is untracked so the slot can be reused later. */ + miniaudio.untrack_device_by_index($0); + }, deviceIndex, deviceType); +} + +static void ma_device_uninit__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->webaudio.duplexRB); + } +} + +static ma_result ma_device_init_by_type__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + int deviceIndex; + ma_uint32 internalPeriodSizeInFrames; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } + + /* Try calculating an appropriate buffer size. */ + internalPeriodSizeInFrames = pConfig->periodSizeInFrames; + if (internalPeriodSizeInFrames == 0) { + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate); + } + + /* The size of the buffer must be a power of 2 and between 256 and 16384. */ + if (internalPeriodSizeInFrames < 256) { + internalPeriodSizeInFrames = 256; + } else if (internalPeriodSizeInFrames > 16384) { + internalPeriodSizeInFrames = 16384; + } else { + internalPeriodSizeInFrames = ma_next_power_of_2(internalPeriodSizeInFrames); + } + + /* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */ + deviceIndex = EM_ASM_INT({ + var channels = $0; + var sampleRate = $1; + var bufferSize = $2; /* In PCM frames. */ + var isCapture = $3; + var pDevice = $4; + + if (typeof(miniaudio) === 'undefined') { + return -1; /* Context not initialized. */ + } + + var device = {}; + + /* The AudioContext must be created in a suspended state. */ + device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate}); + device.webaudio.suspend(); + + /* + We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. Because it's passed between + JavaScript and C it needs to be allocated and freed using Module._malloc() and Module._free(). + */ + device.intermediaryBufferSizeInBytes = channels * bufferSize * 4; + device.intermediaryBuffer = Module._malloc(device.intermediaryBufferSizeInBytes); + device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes); + + /* + Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations. + + ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback + that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of + something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to + work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL + implementation. I'll be avoiding that insane AudioWorklet API like the plague... + + For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the + playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the + MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've + been unable to figure out how to get this as raw PCM. The closest I can think is to use the MIME type for WAV files and just parse it, but I don't know + how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like + this for now. If anyone knows how I could get raw PCM data using the MediaRecorder API please let me know! + */ + device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channels, channels); + + if (isCapture) { + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBuffer === undefined) { + return; /* This means the device has been uninitialized. */ + } + + /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */ + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } + + /* There are some situations where we may want to send silence to the client. */ + var sendSilence = false; + if (device.streamNode === undefined) { + sendSilence = true; + } + + /* Sanity check. This will never happen, right? */ + if (e.inputBuffer.numberOfChannels != channels) { + console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence."); + sendSilence = true; + } + + /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */ + var totalFramesProcessed = 0; + while (totalFramesProcessed < e.inputBuffer.length) { + var framesRemaining = e.inputBuffer.length - totalFramesProcessed; + var framesToProcess = framesRemaining; + if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { + framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); + } + + /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */ + if (sendSilence) { + device.intermediaryBufferView.fill(0.0); + } else { + for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { + for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) { + device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame]; + } + } + } + + /* Send data to the client from our intermediary buffer. */ + ccall("ma_device_process_pcm_frames_capture__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); + + totalFramesProcessed += framesToProcess; + } + }; + + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + device.streamNode = device.webaudio.createMediaStreamSource(stream); + device.streamNode.connect(device.scriptNode); + device.scriptNode.connect(device.webaudio.destination); + }) + .catch(function(error) { + /* I think this should output silence... */ + device.scriptNode.connect(device.webaudio.destination); + }); + } else { + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBuffer === undefined) { + return; /* This means the device has been uninitialized. */ + } + + var outputSilence = false; + + /* Sanity check. This will never happen, right? */ + if (e.outputBuffer.numberOfChannels != channels) { + console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence."); + outputSilence = true; + return; + } + + /* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */ + var totalFramesProcessed = 0; + while (totalFramesProcessed < e.outputBuffer.length) { + var framesRemaining = e.outputBuffer.length - totalFramesProcessed; + var framesToProcess = framesRemaining; + if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { + framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); + } + + /* Read data from the client into our intermediary buffer. */ + ccall("ma_device_process_pcm_frames_playback__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); + + /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */ + if (outputSilence) { + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } + } else { + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { + e.outputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame] = device.intermediaryBufferView[iFrame*channels + iChannel]; + } + } + } + + totalFramesProcessed += framesToProcess; + } + }; + + device.scriptNode.connect(device.webaudio.destination); + } + + return miniaudio.track_device(device); + }, (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels, pConfig->sampleRate, internalPeriodSizeInFrames, deviceType == ma_device_type_capture, pDevice); + + if (deviceIndex < 0) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + if (deviceType == ma_device_type_capture) { + pDevice->webaudio.indexCapture = deviceIndex; + pDevice->capture.internalFormat = ma_format_f32; + pDevice->capture.internalChannels = pConfig->capture.channels; + ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->capture.internalPeriods = 1; + } else { + pDevice->webaudio.indexPlayback = deviceIndex; + pDevice->playback.internalFormat = ma_format_f32; + pDevice->playback.internalChannels = pConfig->playback.channels; + ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames; + pDevice->playback.internalPeriods = 1; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with Web Audio. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } + return result; + } + } + + /* + We need a ring buffer for moving data from the capture device to the playback device. The capture callback is the producer + and the playback callback is the consumer. The buffer needs to be large enough to hold internalPeriodSizeInFrames based on + the external sample rate. + */ + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * 2; + result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->webaudio.duplexRB); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); + } + return result; + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / 3; /* <-- Dividing by 3 because internalPeriods is always set to 1 for WebAudio. */ + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->webaudio.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->webaudio.duplexRB, marginSizeInFrames, pMarginData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.resume(); + }, pDevice->webaudio.indexCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.resume(); + }, pDevice->webaudio.indexPlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the WebAudio API documentation for AudioContext.suspend(): + + Suspends the progression of AudioContext's currentTime, allows any current context processing blocks that are already processed to be played to the + destination, and then allows the system to release its claim on audio hardware. + + I read this to mean that "any current context processing blocks" are processed by suspend() - i.e. They they are drained. We therefore shouldn't need to + do any kind of explicit draining. + */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.suspend(); + }, pDevice->webaudio.indexCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.suspend(); + }, pDevice->webaudio.indexPlayback); + } + + ma_stop_proc onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__webaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_webaudio); + + /* Nothing needs to be done here. */ + (void)pContext; + + return MA_SUCCESS; +} + +static ma_result ma_context_init__webaudio(const ma_context_config* pConfig, ma_context* pContext) +{ + int resultFromJS; + + MA_ASSERT(pContext != NULL); + + /* Here is where our global JavaScript object is initialized. */ + resultFromJS = EM_ASM_INT({ + if ((window.AudioContext || window.webkitAudioContext) === undefined) { + return 0; /* Web Audio not supported. */ + } + + if (typeof(miniaudio) === 'undefined') { + miniaudio = {}; + miniaudio.devices = []; /* Device cache for mapping devices to indexes for JavaScript/C interop. */ + + miniaudio.track_device = function(device) { + /* Try inserting into a free slot first. */ + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == null) { + miniaudio.devices[iDevice] = device; + return iDevice; + } + } + + /* Getting here means there is no empty slots in the array so we just push to the end. */ + miniaudio.devices.push(device); + return miniaudio.devices.length - 1; + }; + + miniaudio.untrack_device_by_index = function(deviceIndex) { + /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */ + miniaudio.devices[deviceIndex] = null; + + /* Trim the array if possible. */ + while (miniaudio.devices.length > 0) { + if (miniaudio.devices[miniaudio.devices.length-1] == null) { + miniaudio.devices.pop(); + } else { + break; + } + } + }; + + miniaudio.untrack_device = function(device) { + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == device) { + return miniaudio.untrack_device_by_index(iDevice); + } + } + }; + + miniaudio.get_device_by_index = function(deviceIndex) { + return miniaudio.devices[deviceIndex]; + }; + } + + return 1; + }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + + if (resultFromJS != 1) { + return MA_FAILED_TO_INIT_BACKEND; + } + + + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__webaudio; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__webaudio; + pContext->onEnumDevices = ma_context_enumerate_devices__webaudio; + pContext->onGetDeviceInfo = ma_context_get_device_info__webaudio; + pContext->onDeviceInit = ma_device_init__webaudio; + pContext->onDeviceUninit = ma_device_uninit__webaudio; + pContext->onDeviceStart = ma_device_start__webaudio; + pContext->onDeviceStop = ma_device_stop__webaudio; + + (void)pConfig; /* Unused. */ + return MA_SUCCESS; +} +#endif /* Web Audio */ + + + +static ma_bool32 ma__is_channel_map_valid(const ma_channel* channelMap, ma_uint32 channels) +{ + /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */ + if (channelMap[0] != MA_CHANNEL_NONE) { + ma_uint32 iChannel; + + if (channels == 0) { + return MA_FALSE; /* No channels. */ + } + + /* A channel cannot be present in the channel map more than once. */ + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_uint32 jChannel; + for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) { + if (channelMap[iChannel] == channelMap[jChannel]) { + return MA_FALSE; + } + } + } + } + + return MA_TRUE; +} + + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { + if (pDevice->capture.usingDefaultFormat) { + pDevice->capture.format = pDevice->capture.internalFormat; + } + if (pDevice->capture.usingDefaultChannels) { + pDevice->capture.channels = pDevice->capture.internalChannels; + } + if (pDevice->capture.usingDefaultChannelMap) { + if (pDevice->capture.internalChannels == pDevice->capture.channels) { + ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels); + } else { + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.channels, pDevice->capture.channelMap); + } + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (pDevice->playback.usingDefaultFormat) { + pDevice->playback.format = pDevice->playback.internalFormat; + } + if (pDevice->playback.usingDefaultChannels) { + pDevice->playback.channels = pDevice->playback.internalChannels; + } + if (pDevice->playback.usingDefaultChannelMap) { + if (pDevice->playback.internalChannels == pDevice->playback.channels) { + ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels); + } else { + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.channels, pDevice->playback.channelMap); + } + } + } + + if (pDevice->usingDefaultSampleRate) { + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { + pDevice->sampleRate = pDevice->capture.internalSampleRate; + } else { + pDevice->sampleRate = pDevice->playback.internalSampleRate; + } + } + + /* PCM converters. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + /* Converting from internal device format to client format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->capture.internalFormat; + converterConfig.channelsIn = pDevice->capture.internalChannels; + converterConfig.sampleRateIn = pDevice->capture.internalSampleRate; + ma_channel_map_copy(converterConfig.channelMapIn, pDevice->capture.internalChannelMap, pDevice->capture.internalChannels); + converterConfig.formatOut = pDevice->capture.format; + converterConfig.channelsOut = pDevice->capture.channels; + converterConfig.sampleRateOut = pDevice->sampleRate; + ma_channel_map_copy(converterConfig.channelMapOut, pDevice->capture.channelMap, pDevice->capture.channels); + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality; + + result = ma_data_converter_init(&converterConfig, &pDevice->capture.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + /* Converting from client format to device format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->playback.format; + converterConfig.channelsIn = pDevice->playback.channels; + converterConfig.sampleRateIn = pDevice->sampleRate; + ma_channel_map_copy(converterConfig.channelMapIn, pDevice->playback.channelMap, pDevice->playback.channels); + converterConfig.formatOut = pDevice->playback.internalFormat; + converterConfig.channelsOut = pDevice->playback.internalChannels; + converterConfig.sampleRateOut = pDevice->playback.internalSampleRate; + ma_channel_map_copy(converterConfig.channelMapOut, pDevice->playback.internalChannelMap, pDevice->playback.internalChannels); + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality; + + result = ma_data_converter_init(&converterConfig, &pDevice->playback.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + + +static ma_thread_result MA_THREADCALL ma_worker_thread(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; + MA_ASSERT(pDevice != NULL); + +#ifdef MA_WIN32 + ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE); +#endif + + /* + When the device is being initialized it's initial state is set to MA_STATE_UNINITIALIZED. Before returning from + ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately + after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker + thread to signal an event to know when the worker thread is ready for action. + */ + ma_device__set_state(pDevice, MA_STATE_STOPPED); + ma_event_signal(&pDevice->stopEvent); + + for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */ + ma_stop_proc onStop; + + /* We wait on an event to know when something has requested that the device be started and the main loop entered. */ + ma_event_wait(&pDevice->wakeupEvent); + + /* Default result code. */ + pDevice->workResult = MA_SUCCESS; + + /* If the reason for the wake up is that we are terminating, just break from the loop. */ + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + break; + } + + /* + Getting to this point means the device is wanting to get started. The function that has requested that the device + be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event + in both the success and error case. It's important that the state of the device is set _before_ signaling the event. + */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTING); + + /* Make sure the state is set appropriately. */ + ma_device__set_state(pDevice, MA_STATE_STARTED); + ma_event_signal(&pDevice->startEvent); + + if (pDevice->pContext->onDeviceMainLoop != NULL) { + pDevice->pContext->onDeviceMainLoop(pDevice); + } else { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "No main loop implementation.", MA_API_NOT_FOUND); + } + + /* + Getting here means we have broken from the main loop which happens the application has requested that device be stopped. Note that this + may have actually already happened above if the device was lost and miniaudio has attempted to re-initialize the device. In this case we + don't want to be doing this a second time. + */ + if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { + if (pDevice->pContext->onDeviceStop) { + pDevice->pContext->onDeviceStop(pDevice); + } + } + + /* After the device has stopped, make sure an event is posted. */ + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + /* + A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. Note that + it's possible that the device has been uninitialized which means we need to _not_ change the status to stopped. We cannot go from an + uninitialized state to stopped state. + */ + if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + ma_event_signal(&pDevice->stopEvent); + } + } + + /* Make sure we aren't continuously waiting on a stop event. */ + ma_event_signal(&pDevice->stopEvent); /* <-- Is this still needed? */ + +#ifdef MA_WIN32 + ma_CoUninitialize(pDevice->pContext); +#endif + + return (ma_thread_result)0; +} + + +/* Helper for determining whether or not the given device is initialized. */ +static ma_bool32 ma_device__is_initialized(ma_device* pDevice) +{ + if (pDevice == NULL) { + return MA_FALSE; + } + + return ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED; +} + + +#ifdef MA_WIN32 +static ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext) +{ + ma_CoUninitialize(pContext); + ma_dlclose(pContext, pContext->win32.hUser32DLL); + ma_dlclose(pContext, pContext->win32.hOle32DLL); + ma_dlclose(pContext, pContext->win32.hAdvapi32DLL); + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__win32(ma_context* pContext) +{ +#ifdef MA_WIN32_DESKTOP + /* Ole32.dll */ + pContext->win32.hOle32DLL = ma_dlopen(pContext, "ole32.dll"); + if (pContext->win32.hOle32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoInitializeEx"); + pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoUninitialize"); + pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoCreateInstance"); + pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoTaskMemFree"); + pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "PropVariantClear"); + pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "StringFromGUID2"); + + + /* User32.dll */ + pContext->win32.hUser32DLL = ma_dlopen(pContext, "user32.dll"); + if (pContext->win32.hUser32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetForegroundWindow"); + pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetDesktopWindow"); + + + /* Advapi32.dll */ + pContext->win32.hAdvapi32DLL = ma_dlopen(pContext, "advapi32.dll"); + if (pContext->win32.hAdvapi32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegOpenKeyExA"); + pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegCloseKey"); + pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegQueryValueExA"); +#endif + + ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); + return MA_SUCCESS; +} +#else +static ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext) +{ +#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) + ma_dlclose(pContext, pContext->posix.pthreadSO); +#else + (void)pContext; +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__nix(ma_context* pContext) +{ + /* pthread */ +#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) + const char* libpthreadFileNames[] = { + "libpthread.so", + "libpthread.so.0", + "libpthread.dylib" + }; + size_t i; + + for (i = 0; i < sizeof(libpthreadFileNames) / sizeof(libpthreadFileNames[0]); ++i) { + pContext->posix.pthreadSO = ma_dlopen(pContext, libpthreadFileNames[i]); + if (pContext->posix.pthreadSO != NULL) { + break; + } + } + + if (pContext->posix.pthreadSO == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->posix.pthread_create = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_create"); + pContext->posix.pthread_join = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_join"); + pContext->posix.pthread_mutex_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_init"); + pContext->posix.pthread_mutex_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_destroy"); + pContext->posix.pthread_mutex_lock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_lock"); + pContext->posix.pthread_mutex_unlock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_unlock"); + pContext->posix.pthread_cond_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_init"); + pContext->posix.pthread_cond_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_destroy"); + pContext->posix.pthread_cond_wait = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_wait"); + pContext->posix.pthread_cond_signal = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_signal"); + pContext->posix.pthread_attr_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_init"); + pContext->posix.pthread_attr_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_destroy"); + pContext->posix.pthread_attr_setschedpolicy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedpolicy"); + pContext->posix.pthread_attr_getschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_getschedparam"); + pContext->posix.pthread_attr_setschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedparam"); +#else + pContext->posix.pthread_create = (ma_proc)pthread_create; + pContext->posix.pthread_join = (ma_proc)pthread_join; + pContext->posix.pthread_mutex_init = (ma_proc)pthread_mutex_init; + pContext->posix.pthread_mutex_destroy = (ma_proc)pthread_mutex_destroy; + pContext->posix.pthread_mutex_lock = (ma_proc)pthread_mutex_lock; + pContext->posix.pthread_mutex_unlock = (ma_proc)pthread_mutex_unlock; + pContext->posix.pthread_cond_init = (ma_proc)pthread_cond_init; + pContext->posix.pthread_cond_destroy = (ma_proc)pthread_cond_destroy; + pContext->posix.pthread_cond_wait = (ma_proc)pthread_cond_wait; + pContext->posix.pthread_cond_signal = (ma_proc)pthread_cond_signal; + pContext->posix.pthread_attr_init = (ma_proc)pthread_attr_init; + pContext->posix.pthread_attr_destroy = (ma_proc)pthread_attr_destroy; +#if !defined(__EMSCRIPTEN__) + pContext->posix.pthread_attr_setschedpolicy = (ma_proc)pthread_attr_setschedpolicy; + pContext->posix.pthread_attr_getschedparam = (ma_proc)pthread_attr_getschedparam; + pContext->posix.pthread_attr_setschedparam = (ma_proc)pthread_attr_setschedparam; +#endif +#endif + + return MA_SUCCESS; +} +#endif + +static ma_result ma_context_init_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_init_backend_apis__win32(pContext); +#else + result = ma_context_init_backend_apis__nix(pContext); +#endif + + return result; +} + +static ma_result ma_context_uninit_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_uninit_backend_apis__win32(pContext); +#else + result = ma_context_uninit_backend_apis__nix(pContext); +#endif + + return result; +} + + +static ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext) +{ + return pContext->isBackendAsynchronous; +} + + +MA_API ma_context_config ma_context_config_init() +{ + ma_context_config config; + MA_ZERO_OBJECT(&config); + + return config; +} + +MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext) +{ + ma_result result; + ma_context_config config; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pContext); + + /* Always make sure the config is set first to ensure properties are available as soon as possible. */ + if (pConfig != NULL) { + config = *pConfig; + } else { + config = ma_context_config_init(); + } + + pContext->logCallback = config.logCallback; + pContext->threadPriority = config.threadPriority; + pContext->pUserData = config.pUserData; + + result = ma_allocation_callbacks_init_copy(&pContext->allocationCallbacks, &config.allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */ + result = ma_context_init_backend_apis(pContext); + if (result != MA_SUCCESS) { + return result; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + MA_ASSERT(pBackendsToIterate != NULL); + + for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { + ma_backend backend = pBackendsToIterate[iBackend]; + + result = MA_NO_BACKEND; + switch (backend) { + #ifdef MA_HAS_WASAPI + case ma_backend_wasapi: + { + result = ma_context_init__wasapi(&config, pContext); + } break; + #endif + #ifdef MA_HAS_DSOUND + case ma_backend_dsound: + { + result = ma_context_init__dsound(&config, pContext); + } break; + #endif + #ifdef MA_HAS_WINMM + case ma_backend_winmm: + { + result = ma_context_init__winmm(&config, pContext); + } break; + #endif + #ifdef MA_HAS_ALSA + case ma_backend_alsa: + { + result = ma_context_init__alsa(&config, pContext); + } break; + #endif + #ifdef MA_HAS_PULSEAUDIO + case ma_backend_pulseaudio: + { + result = ma_context_init__pulse(&config, pContext); + } break; + #endif + #ifdef MA_HAS_JACK + case ma_backend_jack: + { + result = ma_context_init__jack(&config, pContext); + } break; + #endif + #ifdef MA_HAS_COREAUDIO + case ma_backend_coreaudio: + { + result = ma_context_init__coreaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_SNDIO + case ma_backend_sndio: + { + result = ma_context_init__sndio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_AUDIO4 + case ma_backend_audio4: + { + result = ma_context_init__audio4(&config, pContext); + } break; + #endif + #ifdef MA_HAS_OSS + case ma_backend_oss: + { + result = ma_context_init__oss(&config, pContext); + } break; + #endif + #ifdef MA_HAS_AAUDIO + case ma_backend_aaudio: + { + result = ma_context_init__aaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_OPENSL + case ma_backend_opensl: + { + result = ma_context_init__opensl(&config, pContext); + } break; + #endif + #ifdef MA_HAS_WEBAUDIO + case ma_backend_webaudio: + { + result = ma_context_init__webaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_NULL + case ma_backend_null: + { + result = ma_context_init__null(&config, pContext); + } break; + #endif + + default: break; + } + + /* If this iteration was successful, return. */ + if (result == MA_SUCCESS) { + result = ma_mutex_init(pContext, &pContext->deviceEnumLock); + if (result != MA_SUCCESS) { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", result); + } + result = ma_mutex_init(pContext, &pContext->deviceInfoLock); + if (result != MA_SUCCESS) { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", result); + } + +#ifdef MA_DEBUG_OUTPUT + printf("[miniaudio] Endian: %s\n", ma_is_little_endian() ? "LE" : "BE"); + printf("[miniaudio] SSE2: %s\n", ma_has_sse2() ? "YES" : "NO"); + printf("[miniaudio] AVX2: %s\n", ma_has_avx2() ? "YES" : "NO"); + printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO"); + printf("[miniaudio] NEON: %s\n", ma_has_neon() ? "YES" : "NO"); +#endif + + pContext->backend = backend; + return result; + } + } + + /* If we get here it means an error occurred. */ + MA_ZERO_OBJECT(pContext); /* Safety. */ + return MA_NO_BACKEND; +} + +MA_API ma_result ma_context_uninit(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + pContext->onUninit(pContext); + + ma_mutex_uninit(&pContext->deviceEnumLock); + ma_mutex_uninit(&pContext->deviceInfoLock); + ma__free_from_callbacks(pContext->pDeviceInfos, &pContext->allocationCallbacks); + ma_context_uninit_backend_apis(pContext); + + return MA_SUCCESS; +} + +MA_API size_t ma_context_sizeof() +{ + return sizeof(ma_context); +} + + +MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result; + + if (pContext == NULL || pContext->onEnumDevices == NULL || callback == NULL) { + return MA_INVALID_ARGS; + } + + ma_mutex_lock(&pContext->deviceEnumLock); + { + result = pContext->onEnumDevices(pContext, callback, pUserData); + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + + +static ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + /* + We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device + it's just appended to the end. If it's a playback device it's inserted just before the first capture device. + */ + + /* + First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a + simple fixed size increment for buffer expansion. + */ + const ma_uint32 bufferExpansionCount = 2; + const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount; + + if (pContext->deviceInfoCapacity >= totalDeviceInfoCount) { + ma_uint32 oldCapacity = pContext->deviceInfoCapacity; + ma_uint32 newCapacity = oldCapacity + bufferExpansionCount; + ma_device_info* pNewInfos = (ma_device_info*)ma__realloc_from_callbacks(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity, sizeof(*pContext->pDeviceInfos)*oldCapacity, &pContext->allocationCallbacks); + if (pNewInfos == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + pContext->pDeviceInfos = pNewInfos; + pContext->deviceInfoCapacity = newCapacity; + } + + if (deviceType == ma_device_type_playback) { + /* Playback. Insert just before the first capture device. */ + + /* The first thing to do is move all of the capture devices down a slot. */ + ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount; + size_t iCaptureDevice; + for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) { + pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1]; + } + + /* Now just insert where the first capture device was before moving it down a slot. */ + pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo; + pContext->playbackDeviceInfoCount += 1; + } else { + /* Capture. Insert at the end. */ + pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo; + pContext->captureDeviceInfoCount += 1; + } + + (void)pUserData; + return MA_TRUE; +} + +MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount) +{ + ma_result result; + + /* Safety. */ + if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL; + if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0; + if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL; + if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0; + + if (pContext == NULL || pContext->onEnumDevices == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */ + ma_mutex_lock(&pContext->deviceEnumLock); + { + /* Reset everything first. */ + pContext->playbackDeviceInfoCount = 0; + pContext->captureDeviceInfoCount = 0; + + /* Now enumerate over available devices. */ + result = pContext->onEnumDevices(pContext, ma_context_get_devices__enum_callback, NULL); + if (result == MA_SUCCESS) { + /* Playback devices. */ + if (ppPlaybackDeviceInfos != NULL) { + *ppPlaybackDeviceInfos = pContext->pDeviceInfos; + } + if (pPlaybackDeviceCount != NULL) { + *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount; + } + + /* Capture devices. */ + if (ppCaptureDeviceInfos != NULL) { + *ppCaptureDeviceInfos = pContext->pDeviceInfos + pContext->playbackDeviceInfoCount; /* Capture devices come after playback devices. */ + } + if (pCaptureDeviceCount != NULL) { + *pCaptureDeviceCount = pContext->captureDeviceInfoCount; + } + } + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + +MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_device_info deviceInfo; + + /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */ + if (pContext == NULL || pDeviceInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* Help the backend out by copying over the device ID if we have one. */ + if (pDeviceID != NULL) { + MA_COPY_MEMORY(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID)); + } + + /* The backend may have an optimized device info retrieval function. If so, try that first. */ + if (pContext->onGetDeviceInfo != NULL) { + ma_result result; + ma_mutex_lock(&pContext->deviceInfoLock); + { + result = pContext->onGetDeviceInfo(pContext, deviceType, pDeviceID, shareMode, &deviceInfo); + } + ma_mutex_unlock(&pContext->deviceInfoLock); + + /* Clamp ranges. */ + deviceInfo.minChannels = ma_max(deviceInfo.minChannels, MA_MIN_CHANNELS); + deviceInfo.maxChannels = ma_min(deviceInfo.maxChannels, MA_MAX_CHANNELS); + deviceInfo.minSampleRate = ma_max(deviceInfo.minSampleRate, MA_MIN_SAMPLE_RATE); + deviceInfo.maxSampleRate = ma_min(deviceInfo.maxSampleRate, MA_MAX_SAMPLE_RATE); + + *pDeviceInfo = deviceInfo; + return result; + } + + /* Getting here means onGetDeviceInfo has not been set. */ + return MA_ERROR; +} + +MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_FALSE; + } + + return ma_is_loopback_supported(pContext->backend); +} + + +MA_API ma_device_config ma_device_config_init(ma_device_type deviceType) +{ + ma_device_config config; + MA_ZERO_OBJECT(&config); + config.deviceType = deviceType; + + /* Resampling defaults. We must never use the Speex backend by default because it uses licensed third party code. */ + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.resampling.speex.quality = 3; + + return config; +} + +MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_device_config config; + + if (pContext == NULL) { + return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice); + } + if (pDevice == NULL) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + } + if (pConfig == NULL) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pConfig == NULL).", MA_INVALID_ARGS); + } + + /* We need to make a copy of the config so we can set default values if they were left unset in the input config. */ + config = *pConfig; + + /* Basic config validation. */ + if (config.deviceType != ma_device_type_playback && config.deviceType != ma_device_type_capture && config.deviceType != ma_device_type_duplex && config.deviceType != ma_device_type_loopback) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Device type is invalid. Make sure the device type has been set in the config.", MA_INVALID_DEVICE_CONFIG); + } + + if (config.deviceType == ma_device_type_capture || config.deviceType == ma_device_type_duplex) { + if (config.capture.channels > MA_MAX_CHANNELS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Capture channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); + } + if (!ma__is_channel_map_valid(config.capture.channelMap, config.capture.channels)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Capture channel map is invalid.", MA_INVALID_DEVICE_CONFIG); + } + } + + if (config.deviceType == ma_device_type_playback || config.deviceType == ma_device_type_duplex || config.deviceType == ma_device_type_loopback) { + if (config.playback.channels > MA_MAX_CHANNELS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Playback channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); + } + if (!ma__is_channel_map_valid(config.playback.channelMap, config.playback.channels)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Playback channel map is invalid.", MA_INVALID_DEVICE_CONFIG); + } + } + + + MA_ZERO_OBJECT(pDevice); + pDevice->pContext = pContext; + + /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */ + pDevice->pUserData = config.pUserData; + pDevice->onData = config.dataCallback; + pDevice->onStop = config.stopCallback; + + if (((ma_uintptr)pDevice % sizeof(pDevice)) != 0) { + if (pContext->logCallback) { + pContext->logCallback(pContext, pDevice, MA_LOG_LEVEL_WARNING, "WARNING: ma_device_init() called for a device that is not properly aligned. Thread safety is not supported."); + } + } + + pDevice->noPreZeroedOutputBuffer = config.noPreZeroedOutputBuffer; + pDevice->noClip = config.noClip; + pDevice->masterVolumeFactor = 1; + + /* + When passing in 0 for the format/channels/rate/chmap it means the device will be using whatever is chosen by the backend. If everything is set + to defaults it means the format conversion pipeline will run on a fast path where data transfer is just passed straight through to the backend. + */ + if (config.sampleRate == 0) { + config.sampleRate = MA_DEFAULT_SAMPLE_RATE; + pDevice->usingDefaultSampleRate = MA_TRUE; + } + + if (config.capture.format == ma_format_unknown) { + config.capture.format = MA_DEFAULT_FORMAT; + pDevice->capture.usingDefaultFormat = MA_TRUE; + } + if (config.capture.channels == 0) { + config.capture.channels = MA_DEFAULT_CHANNELS; + pDevice->capture.usingDefaultChannels = MA_TRUE; + } + if (config.capture.channelMap[0] == MA_CHANNEL_NONE) { + pDevice->capture.usingDefaultChannelMap = MA_TRUE; + } + + if (config.playback.format == ma_format_unknown) { + config.playback.format = MA_DEFAULT_FORMAT; + pDevice->playback.usingDefaultFormat = MA_TRUE; + } + if (config.playback.channels == 0) { + config.playback.channels = MA_DEFAULT_CHANNELS; + pDevice->playback.usingDefaultChannels = MA_TRUE; + } + if (config.playback.channelMap[0] == MA_CHANNEL_NONE) { + pDevice->playback.usingDefaultChannelMap = MA_TRUE; + } + + + /* Default periods. */ + if (config.periods == 0) { + config.periods = MA_DEFAULT_PERIODS; + pDevice->usingDefaultPeriods = MA_TRUE; + } + + /* + Must have at least 3 periods for full-duplex mode. The idea is that the playback and capture positions hang out in the middle period, with the surrounding + periods acting as a buffer in case the capture and playback devices get's slightly out of sync. + */ + if (config.deviceType == ma_device_type_duplex && config.periods < 3) { + config.periods = 3; + } + + /* Default buffer size. */ + if (config.periodSizeInMilliseconds == 0 && config.periodSizeInFrames == 0) { + config.periodSizeInMilliseconds = (config.performanceProfile == ma_performance_profile_low_latency) ? MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY : MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE; + pDevice->usingDefaultBufferSize = MA_TRUE; + } + + + + pDevice->type = config.deviceType; + pDevice->sampleRate = config.sampleRate; + pDevice->resampling.algorithm = config.resampling.algorithm; + pDevice->resampling.linear.lpfOrder = config.resampling.linear.lpfOrder; + pDevice->resampling.speex.quality = config.resampling.speex.quality; + + pDevice->capture.shareMode = config.capture.shareMode; + pDevice->capture.format = config.capture.format; + pDevice->capture.channels = config.capture.channels; + ma_channel_map_copy(pDevice->capture.channelMap, config.capture.channelMap, config.capture.channels); + + pDevice->playback.shareMode = config.playback.shareMode; + pDevice->playback.format = config.playback.format; + pDevice->playback.channels = config.playback.channels; + ma_channel_map_copy(pDevice->playback.channelMap, config.playback.channelMap, config.playback.channels); + + + /* The internal format, channel count and sample rate can be modified by the backend. */ + pDevice->capture.internalFormat = pDevice->capture.format; + pDevice->capture.internalChannels = pDevice->capture.channels; + pDevice->capture.internalSampleRate = pDevice->sampleRate; + ma_channel_map_copy(pDevice->capture.internalChannelMap, pDevice->capture.channelMap, pDevice->capture.channels); + + pDevice->playback.internalFormat = pDevice->playback.format; + pDevice->playback.internalChannels = pDevice->playback.channels; + pDevice->playback.internalSampleRate = pDevice->sampleRate; + ma_channel_map_copy(pDevice->playback.internalChannelMap, pDevice->playback.channelMap, pDevice->playback.channels); + + result = ma_mutex_init(pContext, &pDevice->lock); + if (result != MA_SUCCESS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create mutex.", result); + } + + /* + When the device is started, the worker thread is the one that does the actual startup of the backend device. We + use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device. + + Each of these semaphores is released internally by the worker thread when the work is completed. The start + semaphore is also used to wake up the worker thread. + */ + result = ma_event_init(pContext, &pDevice->wakeupEvent); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread wakeup event.", result); + } + + result = ma_event_init(pContext, &pDevice->startEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread start event.", result); + } + + result = ma_event_init(pContext, &pDevice->stopEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread stop event.", result); + } + + + result = pContext->onDeviceInit(pContext, &config, pDevice); + if (result != MA_SUCCESS) { + return result; + } + + ma_device__post_init_setup(pDevice, pConfig->deviceType); + + + /* If the backend did not fill out a name for the device, try a generic method. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->capture.name[0] == '\0') { + if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_capture, config.capture.pDeviceID, pDevice->capture.name, sizeof(pDevice->capture.name)) != MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), (config.capture.pDeviceID == NULL) ? MA_DEFAULT_CAPTURE_DEVICE_NAME : "Capture Device", (size_t)-1); + } + } + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + if (pDevice->playback.name[0] == '\0') { + if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_playback, config.playback.pDeviceID, pDevice->playback.name, sizeof(pDevice->playback.name)) != MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), (config.playback.pDeviceID == NULL) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : "Playback Device", (size_t)-1); + } + } + } + + + /* Some backends don't require the worker thread. */ + if (!ma_context_is_backend_asynchronous(pContext)) { + /* The worker thread. */ + result = ma_thread_create(pContext, &pDevice->thread, ma_worker_thread, pDevice); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread.", result); + } + + /* Wait for the worker thread to put the device into it's stopped state for real. */ + ma_event_wait(&pDevice->stopEvent); + } else { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } + + +#ifdef MA_DEBUG_OUTPUT + printf("[%s]\n", ma_get_backend_name(pDevice->pContext->backend)); + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + printf(" %s (%s)\n", pDevice->capture.name, "Capture"); + printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->capture.format), ma_get_format_name(pDevice->capture.internalFormat)); + printf(" Channels: %d -> %d\n", pDevice->capture.channels, pDevice->capture.internalChannels); + printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->capture.internalSampleRate); + printf(" Buffer Size: %d*%d (%d)\n", pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods)); + printf(" Conversion:\n"); + printf(" Pre Format Conversion: %s\n", pDevice->capture.converter.hasPreFormatConversion ? "YES" : "NO"); + printf(" Post Format Conversion: %s\n", pDevice->capture.converter.hasPostFormatConversion ? "YES" : "NO"); + printf(" Channel Routing: %s\n", pDevice->capture.converter.hasChannelConverter ? "YES" : "NO"); + printf(" Resampling: %s\n", pDevice->capture.converter.hasResampler ? "YES" : "NO"); + printf(" Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO"); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + printf(" %s (%s)\n", pDevice->playback.name, "Playback"); + printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat)); + printf(" Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels); + printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate); + printf(" Buffer Size: %d*%d (%d)\n", pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods)); + printf(" Conversion:\n"); + printf(" Pre Format Conversion: %s\n", pDevice->playback.converter.hasPreFormatConversion ? "YES" : "NO"); + printf(" Post Format Conversion: %s\n", pDevice->playback.converter.hasPostFormatConversion ? "YES" : "NO"); + printf(" Channel Routing: %s\n", pDevice->playback.converter.hasChannelConverter ? "YES" : "NO"); + printf(" Resampling: %s\n", pDevice->playback.converter.hasResampler ? "YES" : "NO"); + printf(" Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO"); + } +#endif + + + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED); + return MA_SUCCESS; +} + +MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_context* pContext; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + ma_allocation_callbacks allocationCallbacks; + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pContextConfig != NULL) { + result = ma_allocation_callbacks_init_copy(&allocationCallbacks, &pContextConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + } else { + allocationCallbacks = ma_allocation_callbacks_init_default(); + } + + + pContext = (ma_context*)ma__malloc_from_callbacks(sizeof(*pContext), &allocationCallbacks); + if (pContext == NULL) { + return MA_OUT_OF_MEMORY; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + result = MA_NO_BACKEND; + + for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { + result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext); + if (result == MA_SUCCESS) { + result = ma_device_init(pContext, pConfig, pDevice); + if (result == MA_SUCCESS) { + break; /* Success. */ + } else { + ma_context_uninit(pContext); /* Failure. */ + } + } + } + + if (result != MA_SUCCESS) { + ma__free_from_callbacks(pContext, &allocationCallbacks); + return result; + } + + pDevice->isOwnerOfContext = MA_TRUE; + return result; +} + +MA_API void ma_device_uninit(ma_device* pDevice) +{ + if (!ma_device__is_initialized(pDevice)) { + return; + } + + /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */ + if (ma_device_is_started(pDevice)) { + ma_device_stop(pDevice); + } + + /* Putting the device into an uninitialized state will make the worker thread return. */ + ma_device__set_state(pDevice, MA_STATE_UNINITIALIZED); + + /* Wake up the worker thread and wait for it to properly terminate. */ + if (!ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_event_signal(&pDevice->wakeupEvent); + ma_thread_wait(&pDevice->thread); + } + + pDevice->pContext->onDeviceUninit(pDevice); + + ma_event_uninit(&pDevice->stopEvent); + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); + + if (pDevice->isOwnerOfContext) { + ma_allocation_callbacks allocationCallbacks = pDevice->pContext->allocationCallbacks; + + ma_context_uninit(pDevice->pContext); + ma__free_from_callbacks(pDevice->pContext, &allocationCallbacks); + } + + MA_ZERO_OBJECT(pDevice); +} + +MA_API ma_result ma_device_start(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + } + + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); + } + + if (ma_device__get_state(pDevice) == MA_STATE_STARTED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_start() called when the device is already started.", MA_INVALID_OPERATION); /* Already started. Returning an error to let the application know because it probably means they're doing something wrong. */ + } + + result = MA_ERROR; + ma_mutex_lock(&pDevice->lock); + { + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED); + + ma_device__set_state(pDevice, MA_STATE_STARTING); + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + result = pDevice->pContext->onDeviceStart(pDevice); + if (result == MA_SUCCESS) { + ma_device__set_state(pDevice, MA_STATE_STARTED); + } + } else { + /* + Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the + thread and then wait for the start event. + */ + ma_event_signal(&pDevice->wakeupEvent); + + /* + Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device + into the started state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->startEvent); + result = pDevice->workResult; + } + } + ma_mutex_unlock(&pDevice->lock); + + return result; +} + +MA_API ma_result ma_device_stop(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + } + + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); + } + + if (ma_device__get_state(pDevice) == MA_STATE_STOPPED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_stop() called when the device is already stopped.", MA_INVALID_OPERATION); /* Already stopped. Returning an error to let the application know because it probably means they're doing something wrong. */ + } + + result = MA_ERROR; + ma_mutex_lock(&pDevice->lock); + { + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTED); + + ma_device__set_state(pDevice, MA_STATE_STOPPING); + + /* There's no need to wake up the thread like we do when starting. */ + + if (pDevice->pContext->onDeviceStop) { + result = pDevice->pContext->onDeviceStop(pDevice); + } else { + result = MA_SUCCESS; + } + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } else { + /* Synchronous backends. */ + + /* + We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be + the one who puts the device into the stopped state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->stopEvent); + result = MA_SUCCESS; + } + } + ma_mutex_unlock(&pDevice->lock); + + return result; +} + +MA_API ma_bool32 ma_device_is_started(ma_device* pDevice) +{ + if (pDevice == NULL) { + return MA_FALSE; + } + + return ma_device__get_state(pDevice) == MA_STATE_STARTED; +} + +MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume) +{ + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (volume < 0.0f || volume > 1.0f) { + return MA_INVALID_ARGS; + } + + pDevice->masterVolumeFactor = volume; + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume) +{ + if (pVolume == NULL) { + return MA_INVALID_ARGS; + } + + if (pDevice == NULL) { + *pVolume = 0; + return MA_INVALID_ARGS; + } + + *pVolume = pDevice->masterVolumeFactor; + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB) +{ + if (gainDB > 0) { + return MA_INVALID_ARGS; + } + + return ma_device_set_master_volume(pDevice, ma_gain_db_to_factor(gainDB)); +} + +MA_API ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB) +{ + float factor; + ma_result result; + + if (pGainDB == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_device_get_master_volume(pDevice, &factor); + if (result != MA_SUCCESS) { + *pGainDB = 0; + return result; + } + + *pGainDB = ma_factor_to_gain_db(factor); + + return MA_SUCCESS; +} +#endif /* MA_NO_DEVICE_IO */ + + +/************************************************************************************************************************************************************** + +Biquad Filter + +**************************************************************************************************************************************************************/ +#ifndef MA_BIQUAD_FIXED_POINT_SHIFT +#define MA_BIQUAD_FIXED_POINT_SHIFT 14 +#endif + +static ma_int32 ma_biquad_float_to_fp(double x) +{ + return (ma_int32)(x * (1 << MA_BIQUAD_FIXED_POINT_SHIFT)); +} + +MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2) +{ + ma_biquad_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.b0 = b0; + config.b1 = b1; + config.b2 = b2; + config.a0 = a0; + config.a1 = a1; + config.a2 = a2; + + return config; +} + +MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBQ); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_reinit(pConfig, pBQ); +} + +MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ) +{ + if (pBQ == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->a0 == 0) { + return MA_INVALID_ARGS; /* Division by zero. */ + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBQ->format != ma_format_unknown && pBQ->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBQ->channels != 0 && pBQ->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + + pBQ->format = pConfig->format; + pBQ->channels = pConfig->channels; + + /* Normalize. */ + if (pConfig->format == ma_format_f32) { + pBQ->b0.f32 = (float)(pConfig->b0 / pConfig->a0); + pBQ->b1.f32 = (float)(pConfig->b1 / pConfig->a0); + pBQ->b2.f32 = (float)(pConfig->b2 / pConfig->a0); + pBQ->a1.f32 = (float)(pConfig->a1 / pConfig->a0); + pBQ->a2.f32 = (float)(pConfig->a2 / pConfig->a0); + } else { + pBQ->b0.s32 = ma_biquad_float_to_fp(pConfig->b0 / pConfig->a0); + pBQ->b1.s32 = ma_biquad_float_to_fp(pConfig->b1 / pConfig->a0); + pBQ->b2.s32 = ma_biquad_float_to_fp(pConfig->b2 / pConfig->a0); + pBQ->a1.s32 = ma_biquad_float_to_fp(pConfig->a1 / pConfig->a0); + pBQ->a2.s32 = ma_biquad_float_to_fp(pConfig->a2 / pConfig->a0); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_uint32 c; + const float b0 = pBQ->b0.f32; + const float b1 = pBQ->b1.f32; + const float b2 = pBQ->b2.f32; + const float a1 = pBQ->a1.f32; + const float a2 = pBQ->a2.f32; + + for (c = 0; c < pBQ->channels; c += 1) { + float r1 = pBQ->r1[c].f32; + float r2 = pBQ->r2[c].f32; + float x = pX[c]; + float y; + + y = b0*x + r1; + r1 = b1*x - a1*y + r2; + r2 = b2*x - a2*y; + + pY[c] = y; + pBQ->r1[c].f32 = r1; + pBQ->r2[c].f32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_int32 b0 = pBQ->b0.s32; + const ma_int32 b1 = pBQ->b1.s32; + const ma_int32 b2 = pBQ->b2.s32; + const ma_int32 a1 = pBQ->a1.s32; + const ma_int32 a2 = pBQ->a2.s32; + + for (c = 0; c < pBQ->channels; c += 1) { + ma_int32 r1 = pBQ->r1[c].s32; + ma_int32 r2 = pBQ->r2[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b0*x + r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + r1 = (b1*x - a1*y + r2); + r2 = (b2*x - a2*y); + + pY[c] = (ma_int16)ma_clamp(y, -32768, 32767); + pBQ->r1[c].s32 = r1; + pBQ->r2[c].s32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); +} + +MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pBQ == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pBQ->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else if (pBQ->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return 0; + } + + return 2; +} + + +/************************************************************************************************************************************************************** + +Low-Pass Filter + +**************************************************************************************************************************************************************/ +MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_lpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = 0.5; + + return config; +} + +MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_lpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_lpf1_reinit(pConfig, pLPF); +} + +MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF) +{ + double a; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + + a = ma_exp(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pLPF->a.f32 = (float)a; + } else { + pLPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_f32(ma_lpf1* pLPF, float* pY, const float* pX) +{ + ma_uint32 c; + const float a = pLPF->a.f32; + const float b = 1 - a; + + for (c = 0; c < pLPF->channels; c += 1) { + float r1 = pLPF->r1[c].f32; + float x = pX[c]; + float y; + + y = b*x + a*r1; + + pY[c] = y; + pLPF->r1[c].f32 = y; + } +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_s16(ma_lpf1* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_int32 a = pLPF->a.s32; + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + for (c = 0; c < pLPF->channels; c += 1) { + ma_int32 r1 = pLPF->r1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x + a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pLPF->r1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pLPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pLPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_f32(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_s16(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf1_get_latency(ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_lpf2__get_biquad_config(const ma_lpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + a = s / (2*q); + + bqConfig.b0 = (1 - c) / 2; + bqConfig.b1 = 1 - c; + bqConfig.b2 = (1 - c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_s16(ma_lpf2* pLPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pLPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_f32(ma_lpf2* pLPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pLPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pLPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_lpf2_get_latency(ma_lpf2* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pLPF->bq); +} + + +MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_lpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + +static ma_result ma_lpf_reinit__internal(const ma_lpf_config* pConfig, ma_lpf* pLPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + lpf1Count = pConfig->order % 2; + lpf2Count = pConfig->order / 2; + + MA_ASSERT(lpf1Count <= ma_countof(pLPF->lpf1)); + MA_ASSERT(lpf2Count <= ma_countof(pLPF->lpf2)); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pLPF->lpf1Count != lpf1Count || pLPF->lpf2Count != lpf2Count) { + return MA_INVALID_OPERATION; + } + } + + for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) { + ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + result = ma_lpf1_init(&lpf1Config, &pLPF->lpf1[ilpf1]); + } else { + result = ma_lpf1_reinit(&lpf1Config, &pLPF->lpf1[ilpf1]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) { + ma_lpf2_config lpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (lpf1Count == 1) { + a = (1 + ilpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ilpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cos(a)); + + lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + result = ma_lpf2_init(&lpf2Config, &pLPF->lpf2[ilpf2]); + } else { + result = ma_lpf2_reinit(&lpf2Config, &pLPF->lpf2[ilpf2]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pLPF->lpf1Count = lpf1Count; + pLPF->lpf2Count = lpf2Count; + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_lpf_reinit__internal(pConfig, pLPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF) +{ + return ma_lpf_reinit__internal(pConfig, pLPF, /*isNew*/MA_FALSE); +} + +static MA_INLINE void ma_lpf_process_pcm_frame_f32(ma_lpf* pLPF, float* pY, const void* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_f32); + + MA_COPY_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_f32(&pLPF->lpf1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_f32(&pLPF->lpf2[ilpf2], pY, pY); + } +} + +static MA_INLINE void ma_lpf_process_pcm_frame_s16(ma_lpf* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_s16); + + MA_COPY_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_s16(&pLPF->lpf1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_s16(&pLPF->lpf2[ilpf2], pY, pY); + } +} + +MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + result = ma_lpf1_process_pcm_frames(&pLPF->lpf1[ilpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + result = ma_lpf2_process_pcm_frames(&pLPF->lpf2[ilpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pLPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_f32(pLPF, pFramesOutF32, pFramesInF32); + pFramesOutF32 += pLPF->channels; + pFramesInF32 += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_s16(pLPF, pFramesOutS16, pFramesInS16); + pFramesOutS16 += pLPF->channels; + pFramesInS16 += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return pLPF->lpf2Count*2 + pLPF->lpf1Count; +} + + +/************************************************************************************************************************************************************** + +High-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_hpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + + return config; +} + +MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_hpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, ma_hpf1* pHPF) +{ + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pHPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hpf1_reinit(pConfig, pHPF); +} + +MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF) +{ + double a; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + + a = ma_exp(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pHPF->a.f32 = (float)a; + } else { + pHPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_f32(ma_hpf1* pHPF, float* pY, const float* pX) +{ + ma_uint32 c; + const float a = 1 - pHPF->a.f32; + const float b = 1 - a; + + for (c = 0; c < pHPF->channels; c += 1) { + float r1 = pHPF->r1[c].f32; + float x = pX[c]; + float y; + + y = b*x - a*r1; + + pY[c] = y; + pHPF->r1[c].f32 = y; + } +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_s16(ma_hpf1* pHPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_int32 a = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - pHPF->a.s32); + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + for (c = 0; c < pHPF->channels; c += 1) { + ma_int32 r1 = pHPF->r1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x - a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pHPF->r1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pHPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pHPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_f32(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_s16(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf1_get_latency(ma_hpf1* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_hpf2__get_biquad_config(const ma_hpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + a = s / (2*q); + + bqConfig.b0 = (1 + c) / 2; + bqConfig.b1 = -(1 + c); + bqConfig.b2 = (1 + c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pHPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_s16(ma_hpf2* pHPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pHPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_f32(ma_hpf2* pHPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pHPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pHPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hpf2_get_latency(ma_hpf2* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pHPF->bq); +} + + +MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_hpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + +static ma_result ma_hpf_reinit__internal(const ma_hpf_config* pConfig, ma_hpf* pHPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + hpf1Count = pConfig->order % 2; + hpf2Count = pConfig->order / 2; + + MA_ASSERT(hpf1Count <= ma_countof(pHPF->hpf1)); + MA_ASSERT(hpf2Count <= ma_countof(pHPF->hpf2)); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pHPF->hpf1Count != hpf1Count || pHPF->hpf2Count != hpf2Count) { + return MA_INVALID_OPERATION; + } + } + + for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) { + ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + result = ma_hpf1_init(&hpf1Config, &pHPF->hpf1[ihpf1]); + } else { + result = ma_hpf1_reinit(&hpf1Config, &pHPF->hpf1[ihpf1]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) { + ma_hpf2_config hpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (hpf1Count == 1) { + a = (1 + ihpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ihpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cos(a)); + + hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + result = ma_hpf2_init(&hpf2Config, &pHPF->hpf2[ihpf2]); + } else { + result = ma_hpf2_reinit(&hpf2Config, &pHPF->hpf2[ihpf2]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pHPF->hpf1Count = hpf1Count; + pHPF->hpf2Count = hpf2Count; + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, ma_hpf* pHPF) +{ + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pHPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hpf_reinit__internal(pConfig, pHPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF) +{ + return ma_hpf_reinit__internal(pConfig, pHPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + result = ma_hpf1_process_pcm_frames(&pHPF->hpf1[ihpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + result = ma_hpf2_process_pcm_frames(&pHPF->hpf2[ihpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pHPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_f32(&pHPF->hpf1[ihpf1], pFramesOutF32, pFramesOutF32); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_f32(&pHPF->hpf2[ihpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pHPF->channels; + pFramesInF32 += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_s16(&pHPF->hpf1[ihpf1], pFramesOutS16, pFramesOutS16); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_s16(&pHPF->hpf2[ihpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pHPF->channels; + pFramesInS16 += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf_get_latency(ma_hpf* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return pHPF->hpf2Count*2 + pHPF->hpf1Count; +} + + +/************************************************************************************************************************************************************** + +Band-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_bpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_bpf2__get_biquad_config(const ma_bpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + a = s / (2*q); + + bqConfig.b0 = q * a; + bqConfig.b1 = 0; + bqConfig.b2 = -q * a; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_s16(ma_bpf2* pBPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pBPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_f32(ma_bpf2* pBPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pBPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pBPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_bpf2_get_latency(ma_bpf2* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pBPF->bq); +} + + +MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_bpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + +static ma_result ma_bpf_reinit__internal(const ma_bpf_config* pConfig, ma_bpf* pBPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 bpf2Count; + ma_uint32 ibpf2; + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBPF->format != ma_format_unknown && pBPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBPF->channels != 0 && pBPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + /* We must have an even number of order. */ + if ((pConfig->order & 0x1) != 0) { + return MA_INVALID_ARGS; + } + + bpf2Count = pConfig->order / 2; + + MA_ASSERT(bpf2Count <= ma_countof(pBPF->bpf2)); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pBPF->bpf2Count != bpf2Count) { + return MA_INVALID_OPERATION; + } + } + + for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) { + ma_bpf2_config bpf2Config; + double q; + + /* TODO: Calculate Q to make this a proper Butterworth filter. */ + q = 0.707107; + + bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + result = ma_bpf2_init(&bpf2Config, &pBPF->bpf2[ibpf2]); + } else { + result = ma_bpf2_reinit(&bpf2Config, &pBPF->bpf2[ibpf2]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pBPF->bpf2Count = bpf2Count; + pBPF->format = pConfig->format; + pBPF->channels = pConfig->channels; + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + return ma_bpf_reinit__internal(pConfig, pBPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF) +{ + return ma_bpf_reinit__internal(pConfig, pBPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ibpf2; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + result = ma_bpf2_process_pcm_frames(&pBPF->bpf2[ibpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pBPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_f32(&pBPF->bpf2[ibpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pBPF->channels; + pFramesInF32 += pBPF->channels; + } + } else if (pBPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_s16(&pBPF->bpf2[ibpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pBPF->channels; + pFramesInS16 += pBPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_bpf_get_latency(ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return pBPF->bpf2Count*2; +} + + +/************************************************************************************************************************************************************** + +Notching Filter + +**************************************************************************************************************************************************************/ +MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency) +{ + ma_notch2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_notch2__get_biquad_config(const ma_notch2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + a = s / (2*q); + + bqConfig.b0 = 1; + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_notch2_process_pcm_frame_s16(ma_notch2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_notch2_process_pcm_frame_f32(ma_notch2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_notch2_get_latency(ma_notch2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/************************************************************************************************************************************************************** + +Peaking EQ Filter + +**************************************************************************************************************************************************************/ +MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_peak2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_peak2__get_biquad_config(const ma_peak2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + double A; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + a = s / (2*q); + A = ma_pow(10, (pConfig->gainDB / 40)); + + bqConfig.b0 = 1 + (a * A); + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1 - (a * A); + bqConfig.a0 = 1 + (a / A); + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - (a / A); + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_peak2_process_pcm_frame_s16(ma_peak2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_peak2_process_pcm_frame_f32(ma_peak2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_peak2_get_latency(ma_peak2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +Low Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_loshelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_loshelf2__get_biquad_config(const ma_loshelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + A = ma_pow(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrt((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrt(A)*a; + + bqConfig.b0 = A * ((A + 1) - (A - 1)*c + sqrtA); + bqConfig.b1 = 2 * A * ((A - 1) - (A + 1)*c); + bqConfig.b2 = A * ((A + 1) - (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) + (A - 1)*c + sqrtA; + bqConfig.a1 = -2 * ((A - 1) + (A + 1)*c); + bqConfig.a2 = (A + 1) + (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_s16(ma_loshelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_f32(ma_loshelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_loshelf2_get_latency(ma_loshelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +High Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_hishelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_hishelf2__get_biquad_config(const ma_hishelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sin(w); + c = ma_cos(w); + A = ma_pow(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrt((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrt(A)*a; + + bqConfig.b0 = A * ((A + 1) + (A - 1)*c + sqrtA); + bqConfig.b1 = -2 * A * ((A - 1) + (A + 1)*c); + bqConfig.b2 = A * ((A + 1) + (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) - (A - 1)*c + sqrtA; + bqConfig.a1 = 2 * ((A - 1) - (A + 1)*c); + bqConfig.a2 = (A + 1) - (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_s16(ma_hishelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_f32(ma_hishelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hishelf2_get_latency(ma_hishelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_linear_resampler_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.lpfNyquistFactor = 1; + + return config; +} + +static ma_result ma_linear_resampler_set_rate_internal(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_bool32 isResamplerAlreadyInitialized) +{ + ma_uint32 gcf; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + pResampler->config.sampleRateIn = sampleRateIn; + pResampler->config.sampleRateOut = sampleRateOut; + + /* Simplify the sample rate. */ + gcf = ma_gcf_u32(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut); + pResampler->config.sampleRateIn /= gcf; + pResampler->config.sampleRateOut /= gcf; + + if (pResampler->config.lpfOrder > 0) { + ma_result result; + ma_uint32 lpfSampleRate; + double lpfCutoffFrequency; + ma_lpf_config lpfConfig; + + if (pResampler->config.lpfOrder > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + lpfSampleRate = (ma_uint32)(ma_max(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut)); + lpfCutoffFrequency = ( double)(ma_min(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut) * 0.5 * pResampler->config.lpfNyquistFactor); + + lpfConfig = ma_lpf_config_init(pResampler->config.format, pResampler->config.channels, lpfSampleRate, lpfCutoffFrequency, pResampler->config.lpfOrder); + + /* + If the resampler is alreay initialized we don't want to do a fresh initialization of the low-pass filter because it will result in the cached frames + getting cleared. Instead we re-initialize the filter which will maintain any cached frames. + */ + if (isResamplerAlreadyInitialized) { + result = ma_lpf_reinit(&lpfConfig, &pResampler->lpf); + } else { + result = ma_lpf_init(&lpfConfig, &pResampler->lpf); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pResampler->inAdvanceInt = pResampler->config.sampleRateIn / pResampler->config.sampleRateOut; + pResampler->inAdvanceFrac = pResampler->config.sampleRateIn % pResampler->config.sampleRateOut; + + /* Make sure the fractional part is less than the output sample rate. */ + pResampler->inTimeInt += pResampler->inTimeFrac / pResampler->config.sampleRateOut; + pResampler->inTimeFrac = pResampler->inTimeFrac % pResampler->config.sampleRateOut; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pResampler->config = *pConfig; + + /* Setting the rate will set up the filter and time advances for us. */ + result = ma_linear_resampler_set_rate_internal(pResampler, pConfig->sampleRateIn, pConfig->sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_FALSE); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; + + return MA_SUCCESS; +} + +MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return; + } +} + +static MA_INLINE ma_int16 ma_linear_resampler_mix_s16(ma_int16 x, ma_int16 y, ma_int32 a, const ma_int32 shift) +{ + ma_int32 b; + ma_int32 c; + ma_int32 r; + + MA_ASSERT(a <= (1<> shift); +} + +static void ma_linear_resampler_interpolate_frame_s16(ma_linear_resampler* pResampler, ma_int16* pFrameOut) +{ + ma_uint32 c; + ma_uint32 a; + const ma_uint32 shift = 12; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (pResampler->inTimeFrac << shift) / pResampler->config.sampleRateOut; + + for (c = 0; c < pResampler->config.channels; c += 1) { + ma_int16 s = ma_linear_resampler_mix_s16(pResampler->x0.s16[c], pResampler->x1.s16[c], a, shift); + pFrameOut[c] = s; + } +} + + +static void ma_linear_resampler_interpolate_frame_f32(ma_linear_resampler* pResampler, float* pFrameOut) +{ + ma_uint32 c; + float a; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (float)pResampler->inTimeFrac / pResampler->config.sampleRateOut; + + for (c = 0; c < pResampler->config.channels; c += 1) { + float s = ma_mix_f32_fast(pResampler->x0.f32[c], pResampler->x1.f32[c], a); + pFrameOut[c] = s; + } +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + /* Filter. */ + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pResampler->x1.s16, pResampler->x1.s16); + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + /* Filter. */ + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pFramesOutS16, pFramesOutS16); + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_s16_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_s16_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +static ma_result ma_linear_resampler_process_pcm_frames_f32_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + /* Filter. */ + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pResampler->x1.f32, pResampler->x1.f32); + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + /* Filter. */ + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pFramesOutF32, pFramesOutF32); + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_f32_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_f32_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* */ if (pResampler->config.format == ma_format_s16) { + return ma_linear_resampler_process_pcm_frames_s16(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else if (pResampler->config.format == ma_format_f32) { + return ma_linear_resampler_process_pcm_frames_f32(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Should never get here. Getting here means the format is not supported and you didn't check the return value of ma_linear_resampler_init(). */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; + } +} + + +MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + return ma_linear_resampler_set_rate_internal(pResampler, sampleRateIn, sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_TRUE); +} + +MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut) +{ + ma_uint32 n; + ma_uint32 d; + + d = 1000000; /* We use up to 6 decimal places. */ + n = (ma_uint32)(ratioInOut * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_linear_resampler_set_rate(pResampler, n, d); +} + + +MA_API ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount) +{ + ma_uint64 count; + + if (pResampler == NULL) { + return 0; + } + + if (outputFrameCount == 0) { + return 0; + } + + /* Any whole input frames are consumed before the first output frame is generated. */ + count = pResampler->inTimeInt; + outputFrameCount -= 1; + + /* The rest of the output frames can be calculated in constant time. */ + count += outputFrameCount * pResampler->inAdvanceInt; + count += (pResampler->inTimeFrac + (outputFrameCount * pResampler->inAdvanceFrac)) / pResampler->config.sampleRateOut; + + return count; +} + +MA_API ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount) +{ + ma_uint64 outputFrameCount; + ma_uint64 inTimeInt; + ma_uint64 inTimeFrac; + + if (pResampler == NULL) { + return 0; + } + + /* TODO: Try making this run in constant time. */ + + outputFrameCount = 0; + inTimeInt = pResampler->inTimeInt; + inTimeFrac = pResampler->inTimeFrac; + + for (;;) { + while (inTimeInt > 0 && inputFrameCount > 0) { + inputFrameCount -= 1; + inTimeInt -= 1; + } + + if (inTimeInt > 0) { + break; + } + + outputFrameCount += 1; + + /* Advance time forward. */ + inTimeInt += pResampler->inAdvanceInt; + inTimeFrac += pResampler->inAdvanceFrac; + if (inTimeFrac >= pResampler->config.sampleRateOut) { + inTimeFrac -= pResampler->config.sampleRateOut; + inTimeInt += 1; + } + } + + return outputFrameCount; +} + +MA_API ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return 1 + ma_lpf_get_latency(&pResampler->lpf); +} + +MA_API ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return ma_linear_resampler_get_input_latency(pResampler) * pResampler->config.sampleRateOut / pResampler->config.sampleRateIn; +} + + +#if defined(ma_speex_resampler_h) +#define MA_HAS_SPEEX_RESAMPLER + +static ma_result ma_result_from_speex_err(int err) +{ + switch (err) + { + case RESAMPLER_ERR_SUCCESS: return MA_SUCCESS; + case RESAMPLER_ERR_ALLOC_FAILED: return MA_OUT_OF_MEMORY; + case RESAMPLER_ERR_BAD_STATE: return MA_ERROR; + case RESAMPLER_ERR_INVALID_ARG: return MA_INVALID_ARGS; + case RESAMPLER_ERR_PTR_OVERLAP: return MA_INVALID_ARGS; + case RESAMPLER_ERR_OVERFLOW: return MA_ERROR; + default: return MA_ERROR; + } +} +#endif /* ma_speex_resampler_h */ + +MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm) +{ + ma_resampler_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.algorithm = algorithm; + + /* Linear. */ + config.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.linear.lpfNyquistFactor = 1; + + /* Speex. */ + config.speex.quality = 3; /* Cannot leave this as 0 as that is actually a valid value for Speex resampling quality. */ + + return config; +} + +MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + pResampler->config = *pConfig; + + switch (pConfig->algorithm) + { + case ma_resample_algorithm_linear: + { + ma_linear_resampler_config linearConfig; + linearConfig = ma_linear_resampler_config_init(pConfig->format, pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut); + linearConfig.lpfOrder = pConfig->linear.lpfOrder; + linearConfig.lpfNyquistFactor = pConfig->linear.lpfNyquistFactor; + + result = ma_linear_resampler_init(&linearConfig, &pResampler->state.linear); + if (result != MA_SUCCESS) { + return result; + } + } break; + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + int speexErr; + pResampler->state.speex.pSpeexResamplerState = speex_resampler_init(pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut, pConfig->speex.quality, &speexErr); + if (pResampler->state.speex.pSpeexResamplerState == NULL) { + return ma_result_from_speex_err(speexErr); + } + #else + /* Speex resampler not available. */ + return MA_NO_BACKEND; + #endif + } break; + + default: return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API void ma_resampler_uninit(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return; + } + + if (pResampler->config.algorithm == ma_resample_algorithm_linear) { + ma_linear_resampler_uninit(&pResampler->state.linear); + } + +#if defined(MA_HAS_SPEEX_RESAMPLER) + if (pResampler->config.algorithm == ma_resample_algorithm_speex) { + speex_resampler_destroy((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + } +#endif +} + +static ma_result ma_resampler_process_pcm_frames__read__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); +} + +#if defined(MA_HAS_SPEEX_RESAMPLER) +static ma_result ma_resampler_process_pcm_frames__read__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + int speexErr; + ma_uint64 frameCountOut; + ma_uint64 frameCountIn; + ma_uint64 framesProcessedOut; + ma_uint64 framesProcessedIn; + unsigned int framesPerIteration = UINT_MAX; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFrameCountOut != NULL); + MA_ASSERT(pFrameCountIn != NULL); + + /* + Reading from the Speex resampler requires a bit of dancing around for a few reasons. The first thing is that it's frame counts + are in unsigned int's whereas ours is in ma_uint64. We therefore need to run the conversion in a loop. The other, more complicated + problem, is that we need to keep track of the input time, similar to what we do with the linear resampler. The reason we need to + do this is for ma_resampler_get_required_input_frame_count() and ma_resampler_get_expected_output_frame_count(). + */ + frameCountOut = *pFrameCountOut; + frameCountIn = *pFrameCountIn; + framesProcessedOut = 0; + framesProcessedIn = 0; + + while (framesProcessedOut < frameCountOut && framesProcessedIn < frameCountIn) { + unsigned int frameCountInThisIteration; + unsigned int frameCountOutThisIteration; + const void* pFramesInThisIteration; + void* pFramesOutThisIteration; + + frameCountInThisIteration = framesPerIteration; + if ((ma_uint64)frameCountInThisIteration > (frameCountIn - framesProcessedIn)) { + frameCountInThisIteration = (unsigned int)(frameCountIn - framesProcessedIn); + } + + frameCountOutThisIteration = framesPerIteration; + if ((ma_uint64)frameCountOutThisIteration > (frameCountOut - framesProcessedOut)) { + frameCountOutThisIteration = (unsigned int)(frameCountOut - framesProcessedOut); + } + + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels)); + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels)); + + if (pResampler->config.format == ma_format_f32) { + speexErr = speex_resampler_process_interleaved_float((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const float*)pFramesInThisIteration, &frameCountInThisIteration, (float*)pFramesOutThisIteration, &frameCountOutThisIteration); + } else if (pResampler->config.format == ma_format_s16) { + speexErr = speex_resampler_process_interleaved_int((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const spx_int16_t*)pFramesInThisIteration, &frameCountInThisIteration, (spx_int16_t*)pFramesOutThisIteration, &frameCountOutThisIteration); + } else { + /* Format not supported. Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; + } + + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return ma_result_from_speex_err(speexErr); + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + } + + *pFrameCountOut = framesProcessedOut; + *pFrameCountIn = framesProcessedIn; + + return MA_SUCCESS; +} +#endif + +static ma_result ma_resampler_process_pcm_frames__read(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFramesOut != NULL); + + /* pFramesOut is not NULL, which means we must have a capacity. */ + if (pFrameCountOut == NULL) { + return MA_INVALID_ARGS; + } + + /* It doesn't make sense to not have any input frames to process. */ + if (pFrameCountIn == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_resampler_process_pcm_frames__read__linear(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_resampler_process_pcm_frames__read__speex(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; +} + + +static ma_result ma_resampler_process_pcm_frames__seek__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + /* Seeking is supported natively by the linear resampler. */ + return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, NULL, pFrameCountOut); +} + +#if defined(MA_HAS_SPEEX_RESAMPLER) +static ma_result ma_resampler_process_pcm_frames__seek__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) +{ + /* The generic seek method is implemented in on top of ma_resampler_process_pcm_frames__read() by just processing into a dummy buffer. */ + float devnull[8192]; + ma_uint64 totalOutputFramesToProcess; + ma_uint64 totalOutputFramesProcessed; + ma_uint64 totalInputFramesProcessed; + ma_uint32 bpf; + ma_result result; + + MA_ASSERT(pResampler != NULL); + + totalOutputFramesProcessed = 0; + totalInputFramesProcessed = 0; + bpf = ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels); + + if (pFrameCountOut != NULL) { + /* Seek by output frames. */ + totalOutputFramesToProcess = *pFrameCountOut; + } else { + /* Seek by input frames. */ + MA_ASSERT(pFrameCountIn != NULL); + totalOutputFramesToProcess = ma_resampler_get_expected_output_frame_count(pResampler, *pFrameCountIn); + } + + if (pFramesIn != NULL) { + /* Process input data. */ + MA_ASSERT(pFrameCountIn != NULL); + while (totalOutputFramesProcessed < totalOutputFramesToProcess && totalInputFramesProcessed < *pFrameCountIn) { + ma_uint64 inputFramesToProcessThisIteration = (*pFrameCountIn - totalInputFramesProcessed); + ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed); + if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) { + outputFramesToProcessThisIteration = sizeof(devnull) / bpf; + } + + result = ma_resampler_process_pcm_frames__read(pResampler, ma_offset_ptr(pFramesIn, totalInputFramesProcessed*bpf), &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + totalOutputFramesProcessed += outputFramesToProcessThisIteration; + totalInputFramesProcessed += inputFramesToProcessThisIteration; + } + } else { + /* Don't process input data - just update timing and filter state as if zeroes were passed in. */ + while (totalOutputFramesProcessed < totalOutputFramesToProcess) { + ma_uint64 inputFramesToProcessThisIteration = 16384; + ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed); + if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) { + outputFramesToProcessThisIteration = sizeof(devnull) / bpf; + } + + result = ma_resampler_process_pcm_frames__read(pResampler, NULL, &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + totalOutputFramesProcessed += outputFramesToProcessThisIteration; + totalInputFramesProcessed += inputFramesToProcessThisIteration; + } + } + + + if (pFrameCountIn != NULL) { + *pFrameCountIn = totalInputFramesProcessed; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = totalOutputFramesProcessed; + } + + return MA_SUCCESS; +} +#endif + +static ma_result ma_resampler_process_pcm_frames__seek(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_resampler_process_pcm_frames__seek__linear(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); + } break; + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_resampler_process_pcm_frames__seek__speex(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); + #else + break; + #endif + }; + + default: break; + } + + /* Should never hit this. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; +} + + +MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pFrameCountOut == NULL && pFrameCountIn == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut != NULL) { + /* Reading. */ + return ma_resampler_process_pcm_frames__read(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Seeking. */ + return ma_resampler_process_pcm_frames__seek(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); + } +} + +MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + pResampler->config.sampleRateIn = sampleRateIn; + pResampler->config.sampleRateOut = sampleRateOut; + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_set_rate(&pResampler->state.linear, sampleRateIn, sampleRateOut); + } break; + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_result_from_speex_err(speex_resampler_set_rate((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, sampleRateIn, sampleRateOut)); + #else + break; + #endif + }; + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; +} + +MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->config.algorithm == ma_resample_algorithm_linear) { + return ma_linear_resampler_set_rate_ratio(&pResampler->state.linear, ratio); + } else { + /* Getting here means the backend does not have native support for setting the rate as a ratio so we just do it generically. */ + ma_uint32 n; + ma_uint32 d; + + d = 1000000; /* We use up to 6 decimal places. */ + n = (ma_uint32)(ratio * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_resampler_set_rate(pResampler, n, d); + } +} + +MA_API ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount) +{ + if (pResampler == NULL) { + return 0; + } + + if (outputFrameCount == 0) { + return 0; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_required_input_frame_count(&pResampler->state.linear, outputFrameCount); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + ma_uint64 count; + int speexErr = ma_speex_resampler_get_required_input_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, outputFrameCount, &count); + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return 0; + } + + return count; + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +MA_API ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount) +{ + if (pResampler == NULL) { + return 0; /* Invalid args. */ + } + + if (inputFrameCount == 0) { + return 0; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_expected_output_frame_count(&pResampler->state.linear, inputFrameCount); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + ma_uint64 count; + int speexErr = ma_speex_resampler_get_expected_output_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, inputFrameCount, &count); + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return 0; + } + + return count; + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +MA_API ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_input_latency(&pResampler->state.linear); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return (ma_uint64)ma_speex_resampler_get_input_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +MA_API ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_output_latency(&pResampler->state.linear); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return (ma_uint64)ma_speex_resampler_get_output_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +/************************************************************************************************************************************************************** + +Channel Conversion + +**************************************************************************************************************************************************************/ +#ifndef MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT +#define MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT 12 +#endif + +#define MA_PLANE_LEFT 0 +#define MA_PLANE_RIGHT 1 +#define MA_PLANE_FRONT 2 +#define MA_PLANE_BACK 3 +#define MA_PLANE_BOTTOM 4 +#define MA_PLANE_TOP 5 + +static float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = { + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */ + { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */ + { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */ + { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */ + { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */ + { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */ + { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */ + { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */ + { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */ + { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */ + { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */ +}; + +static float ma_calculate_channel_position_rectangular_weight(ma_channel channelPositionA, ma_channel channelPositionB) +{ + /* + Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to + the following output configuration: + + - front/left + - side/left + - back/left + + The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount + of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated. + + Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left + speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted + from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would + receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between + the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works + across 3 spatial dimensions. + + The first thing to do is figure out how each speaker's volume is spread over each of plane: + - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane + - side/left: 1 plane (left only) = 1/1 = entire volume from left plane + - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane + - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane + + The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other + channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be + taken by the other to produce the final contribution. + */ + + /* Contribution = Sum(Volume to Give * Volume to Take) */ + float contribution = + g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] + + g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] + + g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] + + g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] + + g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] + + g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5]; + + return contribution; +} + +MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode) +{ + ma_channel_converter_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + ma_channel_map_copy(config.channelMapIn, channelMapIn, channelsIn); + ma_channel_map_copy(config.channelMapOut, channelMapOut, channelsOut); + config.mixingMode = mixingMode; + + return config; +} + +static ma_int32 ma_channel_converter_float_to_fp(float x) +{ + return (ma_int32)(x * (1<channelsIn, pConfig->channelMapIn)) { + return MA_INVALID_ARGS; /* Invalid input channel map. */ + } + if (!ma_channel_map_valid(pConfig->channelsOut, pConfig->channelMapOut)) { + return MA_INVALID_ARGS; /* Invalid output channel map. */ + } + + if (pConfig->format != ma_format_s16 && pConfig->format != ma_format_f32) { + return MA_INVALID_ARGS; /* Invalid format. */ + } + + pConverter->format = pConfig->format; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + ma_channel_map_copy(pConverter->channelMapIn, pConfig->channelMapIn, pConfig->channelsIn); + ma_channel_map_copy(pConverter->channelMapOut, pConfig->channelMapOut, pConfig->channelsOut); + pConverter->mixingMode = pConfig->mixingMode; + + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.f32[iChannelIn][iChannelOut] = pConfig->weights[iChannelIn][iChannelOut]; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(pConfig->weights[iChannelIn][iChannelOut]); + } + } + } + + + + /* If the input and output channels and channel maps are the same we should use a passthrough. */ + if (pConverter->channelsIn == pConverter->channelsOut) { + if (ma_channel_map_equal(pConverter->channelsIn, pConverter->channelMapIn, pConverter->channelMapOut)) { + pConverter->isPassthrough = MA_TRUE; + } + if (ma_channel_map_blank(pConverter->channelsIn, pConverter->channelMapIn) || ma_channel_map_blank(pConverter->channelsOut, pConverter->channelMapOut)) { + pConverter->isPassthrough = MA_TRUE; + } + } + + + /* + We can use a simple case for expanding the mono channel. This will used when expanding a mono input into any output so long + as no LFE is present in the output. + */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsIn == 1 && pConverter->channelMapIn[0] == MA_CHANNEL_MONO) { + /* Optimal case if no LFE is in the output channel map. */ + pConverter->isSimpleMonoExpansion = MA_TRUE; + if (ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, MA_CHANNEL_LFE)) { + pConverter->isSimpleMonoExpansion = MA_FALSE; + } + } + } + + /* Another optimized case is stereo to mono. */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsOut == 1 && pConverter->channelMapOut[0] == MA_CHANNEL_MONO && pConverter->channelsIn == 2) { + /* Optimal case if no LFE is in the input channel map. */ + pConverter->isStereoToMono = MA_TRUE; + if (ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, MA_CHANNEL_LFE)) { + pConverter->isStereoToMono = MA_FALSE; + } + } + } + + + /* + Here is where we do a bit of pre-processing to know how each channel should be combined to make up the output. Rules: + + 1) If it's a passthrough, do nothing - it's just a simple memcpy(). + 2) If the channel counts are the same and every channel position in the input map is present in the output map, use a + simple shuffle. An example might be different 5.1 channel layouts. + 3) Otherwise channels are blended based on spatial locality. + */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsIn == pConverter->channelsOut) { + ma_bool32 areAllChannelPositionsPresent = MA_TRUE; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_bool32 isInputChannelPositionInOutput = MA_FALSE; + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) { + isInputChannelPositionInOutput = MA_TRUE; + break; + } + } + + if (!isInputChannelPositionInOutput) { + areAllChannelPositionsPresent = MA_FALSE; + break; + } + } + + if (areAllChannelPositionsPresent) { + pConverter->isSimpleShuffle = MA_TRUE; + + /* + All the router will be doing is rearranging channels which means all we need to do is use a shuffling table which is just + a mapping between the index of the input channel to the index of the output channel. + */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) { + pConverter->shuffleTable[iChannelIn] = (ma_uint8)iChannelOut; + break; + } + } + } + } + } + } + + + /* + Here is where weights are calculated. Note that we calculate the weights at all times, even when using a passthrough and simple + shuffling. We use different algorithms for calculating weights depending on our mixing mode. + + In simple mode we don't do any blending (except for converting between mono, which is done in a later step). Instead we just + map 1:1 matching channels. In this mode, if no channels in the input channel map correspond to anything in the output channel + map, nothing will be heard! + */ + + /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; + + if (channelPosIn == channelPosOut) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = 1; + } + } + } + } + + /* + The mono channel is accumulated on all other channels, except LFE. Make sure in this loop we exclude output mono channels since + they were handled in the pass above. + */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + if (channelPosIn == MA_CHANNEL_MONO) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; + + if (channelPosOut != MA_CHANNEL_NONE && channelPosOut != MA_CHANNEL_MONO && channelPosOut != MA_CHANNEL_LFE) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = 1; + } + } + } + } + } + + /* The output mono channel is the average of all non-none, non-mono and non-lfe input channels. */ + { + ma_uint32 len = 0; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { + len += 1; + } + } + + if (len > 0) { + float monoWeight = 1.0f / len; + + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; + + if (channelPosOut == MA_CHANNEL_MONO) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(monoWeight); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = monoWeight; + } + } + } + } + } + } + } + + + /* Input and output channels that are not present on the other side need to be blended in based on spatial locality. */ + switch (pConverter->mixingMode) + { + case ma_channel_mix_mode_rectangular: + { + /* Unmapped input channels. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + if (ma_is_spatial_channel_position(channelPosIn)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, channelPosIn)) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; + + if (ma_is_spatial_channel_position(channelPosOut)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_s16) { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight); + } + } else { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } + } + } + } + } + } + + /* Unmapped output channels. */ + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; + + if (ma_is_spatial_channel_position(channelPosOut)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, channelPosOut)) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + + if (ma_is_spatial_channel_position(channelPosIn)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_s16) { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight); + } + } else { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } + } + } + } + } + } + } break; + + case ma_channel_mix_mode_custom_weights: + case ma_channel_mix_mode_simple: + default: + { + /* Fallthrough. */ + } break; + } + + + return MA_SUCCESS; +} + +MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter) +{ + if (pConverter == NULL) { + return; + } +} + +static ma_result ma_channel_converter_process_pcm_frames__passthrough(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__simple_shuffle(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannelIn; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == pConverter->channelsOut); + + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + pFramesOutS16[pConverter->shuffleTable[iChannelIn]] = pFramesInS16[iChannelIn]; + } + } + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + pFramesOutF32[pConverter->shuffleTable[iChannelIn]] = pFramesInF32[iChannelIn]; + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__simple_mono_expansion(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame*2 + 0] = pFramesInS16[iFrame]; + pFramesOutS16[iFrame*2 + 1] = pFramesInS16[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS16[iFrame*pConverter->channelsOut + iChannel] = pFramesInS16[iFrame]; + } + } + } + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame*2 + 0] = pFramesInF32[iFrame]; + pFramesOutF32[iFrame*2 + 1] = pFramesInF32[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannel] = pFramesInF32[iFrame]; + } + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__stereo_to_mono(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == 2); + MA_ASSERT(pConverter->channelsOut == 1); + + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame] = (ma_int16)(((ma_int32)pFramesInS16[iFrame*2+0] + (ma_int32)pFramesInS16[iFrame*2+1]) / 2); + } + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame] = (pFramesInF32[iFrame*2+0] + pFramesInF32[iFrame*2+0]) * 0.5f; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__weights(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */ + + /* Clear. */ + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + + /* Accumulate. */ + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int32 s = pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut]; + s += (pFramesInS16[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut] = (ma_int16)ma_clamp(s, -32768, 32767); + } + } + } + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannelOut] += pFramesInF32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.f32[iChannelIn][iChannelOut]; + } + } + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesIn == NULL) { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; + } + + if (pConverter->isPassthrough) { + return ma_channel_converter_process_pcm_frames__passthrough(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isSimpleShuffle) { + return ma_channel_converter_process_pcm_frames__simple_shuffle(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isSimpleMonoExpansion) { + return ma_channel_converter_process_pcm_frames__simple_mono_expansion(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isStereoToMono) { + return ma_channel_converter_process_pcm_frames__stereo_to_mono(pConverter, pFramesOut, pFramesIn, frameCount); + } else { + return ma_channel_converter_process_pcm_frames__weights(pConverter, pFramesOut, pFramesIn, frameCount); + } +} + + +/************************************************************************************************************************************************************** + +Data Conversion + +**************************************************************************************************************************************************************/ +MA_API ma_data_converter_config ma_data_converter_config_init_default() +{ + ma_data_converter_config config; + MA_ZERO_OBJECT(&config); + + config.ditherMode = ma_dither_mode_none; + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.allowDynamicSampleRate = MA_FALSE; /* Disable dynamic sample rates by default because dynamic rate adjustments should be quite rare and it allows an optimization for cases when the in and out sample rates are the same. */ + + /* Linear resampling defaults. */ + config.resampling.linear.lpfOrder = 1; + config.resampling.linear.lpfNyquistFactor = 1; + + /* Speex resampling defaults. */ + config.resampling.speex.quality = 3; + + return config; +} + +MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = formatIn; + config.formatOut = formatOut; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + + return config; +} + +MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter) +{ + ma_result result; + ma_format midFormat; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pConverter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pConverter->config = *pConfig; + + /* + We want to avoid as much data conversion as possible. The channel converter and resampler both support s16 and f32 natively. We need to decide + on the format to use for this stage. We call this the mid format because it's used in the middle stage of the conversion pipeline. If the output + format is either s16 or f32 we use that one. If that is not the case it will do the same thing for the input format. If it's neither we just + use f32. + */ + /* */ if (pConverter->config.formatOut == ma_format_s16 || pConverter->config.formatOut == ma_format_f32) { + midFormat = pConverter->config.formatOut; + } else if (pConverter->config.formatIn == ma_format_s16 || pConverter->config.formatIn == ma_format_f32) { + midFormat = pConverter->config.formatIn; + } else { + midFormat = ma_format_f32; + } + + /* Channel converter. We always initialize this, but we check if it configures itself as a passthrough to determine whether or not it's needed. */ + { + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + ma_channel_converter_config channelConverterConfig; + + channelConverterConfig = ma_channel_converter_config_init(midFormat, pConverter->config.channelsIn, pConverter->config.channelMapIn, pConverter->config.channelsOut, pConverter->config.channelMapOut, pConverter->config.channelMixMode); + + /* Channel weights. */ + for (iChannelIn = 0; iChannelIn < pConverter->config.channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->config.channelsOut; iChannelOut += 1) { + channelConverterConfig.weights[iChannelIn][iChannelOut] = pConverter->config.channelWeights[iChannelIn][iChannelOut]; + } + } + + result = ma_channel_converter_init(&channelConverterConfig, &pConverter->channelConverter); + if (result != MA_SUCCESS) { + return result; + } + + /* If the channel converter is not a passthrough we need to enable it. Otherwise we can skip it. */ + if (pConverter->channelConverter.isPassthrough == MA_FALSE) { + pConverter->hasChannelConverter = MA_TRUE; + } + } + + + /* Always enable dynamic sample rates if the input sample rate is different because we're always going to need a resampler in this case anyway. */ + if (pConverter->config.resampling.allowDynamicSampleRate == MA_FALSE) { + pConverter->config.resampling.allowDynamicSampleRate = pConverter->config.sampleRateIn != pConverter->config.sampleRateOut; + } + + /* Resampler. */ + if (pConverter->config.resampling.allowDynamicSampleRate) { + ma_resampler_config resamplerConfig; + ma_uint32 resamplerChannels; + + /* The resampler is the most expensive part of the conversion process, so we need to do it at the stage where the channel count is at it's lowest. */ + if (pConverter->config.channelsIn < pConverter->config.channelsOut) { + resamplerChannels = pConverter->config.channelsIn; + } else { + resamplerChannels = pConverter->config.channelsOut; + } + + resamplerConfig = ma_resampler_config_init(midFormat, resamplerChannels, pConverter->config.sampleRateIn, pConverter->config.sampleRateOut, pConverter->config.resampling.algorithm); + resamplerConfig.linear.lpfOrder = pConverter->config.resampling.linear.lpfOrder; + resamplerConfig.linear.lpfNyquistFactor = pConverter->config.resampling.linear.lpfNyquistFactor; + resamplerConfig.speex.quality = pConverter->config.resampling.speex.quality; + + result = ma_resampler_init(&resamplerConfig, &pConverter->resampler); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->hasResampler = MA_TRUE; + } + + + /* We can simplify pre- and post-format conversion if we have neither channel conversion nor resampling. */ + if (pConverter->hasChannelConverter == MA_FALSE && pConverter->hasResampler == MA_FALSE) { + /* We have neither channel conversion nor resampling so we'll only need one of pre- or post-format conversion, or none if the input and output formats are the same. */ + if (pConverter->config.formatIn == pConverter->config.formatOut) { + /* The formats are the same so we can just pass through. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_FALSE; + } else { + /* The formats are different so we need to do either pre- or post-format conversion. It doesn't matter which. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_TRUE; + } + } else { + /* We have a channel converter and/or resampler so we'll need channel conversion based on the mid format. */ + if (pConverter->config.formatIn != midFormat) { + pConverter->hasPreFormatConversion = MA_TRUE; + } + if (pConverter->config.formatOut != midFormat) { + pConverter->hasPostFormatConversion = MA_TRUE; + } + } + + /* We can enable passthrough optimizations if applicable. Note that we'll only be able to do this if the sample rate is static. */ + if (pConverter->hasPreFormatConversion == MA_FALSE && + pConverter->hasPostFormatConversion == MA_FALSE && + pConverter->hasChannelConverter == MA_FALSE && + pConverter->hasResampler == MA_FALSE) { + pConverter->isPassthrough = MA_TRUE; + } + + return MA_SUCCESS; +} + +MA_API void ma_data_converter_uninit(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return; + } + + if (pConverter->hasResampler) { + ma_resampler_uninit(&pConverter->resampler); + } +} + +static ma_result ma_data_converter_process_pcm_frames__passthrough(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__format_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pFramesOut, pConverter->config.formatOut, pFramesIn, pConverter->config.formatIn, frameCount, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + + +static ma_result ma_data_converter_process_pcm_frames__resample_with_format_conversion(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result = MA_SUCCESS; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountInThisIteration > tempBufferOutCap) { + frameCountInThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pFramesOutThisIteration, &frameCountOutThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesInThisIteration, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->resampler.config.channels, pConverter->config.ditherMode); + } + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return result; +} + +static ma_result ma_data_converter_process_pcm_frames__resample_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pConverter != NULL); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* Neither pre- nor post-format required. This is simple case where only resampling is required. */ + return ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Format conversion required. */ + return ma_data_converter_process_pcm_frames__resample_with_format_conversion(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + +static ma_result ma_data_converter_process_pcm_frames__channels_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* No format conversion required. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + /* Format conversion required. */ + ma_uint64 framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferInCap) { + frameCountThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pTempBufferIn, frameCountThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOutThisIteration, pTempBufferIn, frameCountThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pFramesInThisIteration, frameCountThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->channelConverter.format, frameCountThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode); + } + } + + framesProcessed += frameCountThisIteration; + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__resampling_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsIn); + MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsOut); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pResampleBufferIn; + void* pChannelsBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } + + /* Run input data through the resampler and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + /* We can't read more frames than can fit in the output buffer. */ + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } + + /* We need to ensure we don't try to process too many input frames that we run out of room in the output buffer. If this happens we'll end up glitching. */ + { + ma_uint64 requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration); + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } + + if (pConverter->hasPreFormatConversion) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + pResampleBufferIn = pTempBufferIn; + } else { + pResampleBufferIn = NULL; + } + } else { + pResampleBufferIn = pRunningFramesIn; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pResampleBufferIn, &frameCountInThisIteration, pTempBufferMid, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* + The input data has been resampled so now we need to run it through the channel converter. The input data is always contained in pTempBufferMid. We only need to do + this part if we have an output buffer. + */ + if (pFramesOut != NULL) { + if (pConverter->hasPostFormatConversion) { + pChannelsBufferOut = pTempBufferOut; + } else { + pChannelsBufferOut = pRunningFramesOut; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pChannelsBufferOut, pTempBufferMid, frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we do post format conversion. */ + if (pConverter->hasPostFormatConversion) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pChannelsBufferOut, pConverter->channelConverter.format, frameCountOutThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode); + } + } + + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__channels_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsOut); + MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsIn); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pChannelsBufferIn; + void* pResampleBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } + + /* Run input data through the channel converter and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + + if (pRunningFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + pChannelsBufferIn = pTempBufferIn; + } else { + pChannelsBufferIn = NULL; + } + } else { + pChannelsBufferIn = pRunningFramesIn; + } + + /* + We can't convert more frames than will fit in the output buffer. We shouldn't actually need to do this check because the channel count is always reduced + in this case which means we should always have capacity, but I'm leaving it here just for safety for future maintenance. + */ + if (frameCountInThisIteration > tempBufferMidCap) { + frameCountInThisIteration = tempBufferMidCap; + } + + /* + Make sure we don't read any more input frames than we need to fill the output frame count. If we do this we will end up in a situation where we lose some + input samples and will end up glitching. + */ + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + if (pConverter->hasPostFormatConversion) { + ma_uint64 requiredInputFrameCount; + + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration); + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferMid, pChannelsBufferIn, frameCountInThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* At this point we have converted the channels to the output channel count which we now need to resample. */ + if (pConverter->hasPostFormatConversion) { + pResampleBufferOut = pTempBufferOut; + } else { + pResampleBufferOut = pRunningFramesOut; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferMid, &frameCountInThisIteration, pResampleBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we can do the post format conversion. */ + if (pConverter->hasPostFormatConversion) { + if (pRunningFramesOut != NULL) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pResampleBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->config.channelsOut, pConverter->config.ditherMode); + } + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->isPassthrough) { + return ma_data_converter_process_pcm_frames__passthrough(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + + /* + Here is where the real work is done. Getting here means we're not using a passthrough and we need to move the data through each of the relevant stages. The order + of our stages depends on the input and output channel count. If the input channels is less than the output channels we want to do sample rate conversion first so + that it has less work (resampling is the most expensive part of format conversion). + */ + if (pConverter->config.channelsIn < pConverter->config.channelsOut) { + /* Do resampling first, if necessary. */ + MA_ASSERT(pConverter->hasChannelConverter == MA_TRUE); + + if (pConverter->hasResampler) { + /* Resampling first. */ + return ma_data_converter_process_pcm_frames__resampling_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Resampling not required. */ + return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + } else { + /* Do channel conversion first, if necessary. */ + if (pConverter->hasChannelConverter) { + if (pConverter->hasResampler) { + /* Channel routing first. */ + return ma_data_converter_process_pcm_frames__channels_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Resampling not required. */ + return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + } else { + /* Channel routing not required. */ + if (pConverter->hasResampler) { + /* Resampling only. */ + return ma_data_converter_process_pcm_frames__resample_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* No channel routing nor resampling required. Just format conversion. */ + return ma_data_converter_process_pcm_frames__format_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + } + } +} + +MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate(&pConverter->resampler, sampleRateIn, sampleRateOut); +} + +MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate_ratio(&pConverter->resampler, ratioInOut); +} + +MA_API ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_required_input_frame_count(&pConverter->resampler, outputFrameCount); + } else { + return outputFrameCount; /* 1:1 */ + } +} + +MA_API ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_expected_output_frame_count(&pConverter->resampler, inputFrameCount); + } else { + return inputFrameCount; /* 1:1 */ + } +} + +MA_API ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_input_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + +MA_API ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_output_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + + + +/************************************************************************************************************************************************************** + +Format Conversion + +**************************************************************************************************************************************************************/ + +static MA_INLINE ma_int16 ma_pcm_sample_f32_to_s16(float x) +{ + return (ma_int16)(x * 32767.0f); +} + +/* u8 */ +MA_API void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_uint8)); +} + + +static MA_INLINE void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = x - 128; + x = x << 8; + dst_s16[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = x - 128; + + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = 0; + dst_s24[i*3+2] = (ma_uint8)((ma_int8)x); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_u8[i]; + x = x - 128; + x = x << 24; + dst_s32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_u8[i]; + x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } +} +#else +static MA_INLINE void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + if (channels == 1) { + ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8)); + } else if (channels == 2) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + dst_u8[iFrame*2 + 0] = src_u8[0][iFrame]; + dst_u8[iFrame*2 + 1] = src_u8[1][iFrame]; + } + } else { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } + } +} +#endif + +MA_API void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst_u8 = (ma_uint8**)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s16 */ +static MA_INLINE void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + x = x >> 8; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F); + if ((x + dither) <= 0x7FFF) { + x = (ma_int16)(x + dither); + } else { + x = 0x7FFF; + } + + x = x >> 8; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_int16)); +} + + +static MA_INLINE void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF); + dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = src_s16[i] << 16; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_s16[i]; + +#if 0 + /* The accurate way. */ + x = x + 32768.0f; /* -32768..32767 to 0..65535 */ + x = x * 0.00003051804379339284f; /* 0..65535 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int16** src_s16 = (const ma_int16**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16** dst_s16 = (ma_int16**)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s24 */ +static MA_INLINE void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int8 x = (ma_int8)src_s24[i*3 + 2] + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]); + ma_uint16 dst_hi = ((ma_uint16)src_s24[i*3 + 2]) << 8; + dst_s16[i] = (ma_int16)dst_lo | dst_hi; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * 3); +} + + +static MA_INLINE void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8); + +#if 0 + /* The accurate way. */ + x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */ + x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst8 = (ma_uint8*)dst; + const ma_uint8** src8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0]; + dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1]; + dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst8 = (ma_uint8**)dst; + const ma_uint8* src8 = (const ma_uint8*)src; + + ma_uint32 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0]; + dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1]; + dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + + +/* s32 */ +static MA_INLINE void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint32 x = (ma_uint32)src_s32[i]; + dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8); + dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16); + dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24); + } + + (void)ditherMode; /* No dithering for s32 -> s24. */ +} + +static MA_INLINE void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(ma_int32)); +} + + +static MA_INLINE void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + double x = src_s32[i]; + +#if 0 + x = x + 2147483648.0; + x = x * 0.0000000004656612873077392578125; + x = x - 1; +#else + x = x / 2147483648.0; +#endif + + dst_f32[i] = (float)x; + } + + (void)ditherMode; /* No dithering for s32 -> f32. */ +} + +static MA_INLINE void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int32** src_s32 = (const ma_int32**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32** dst_s32 = (ma_int32**)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +/* f32 */ +static MA_INLINE void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_uint8* dst_u8 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -128; + ditherMax = 1.0f / 127; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 127.5f; /* 0..2 to 0..255 */ + + dst_u8[i] = (ma_uint8)x; + } +} + +static MA_INLINE void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 32767.5f; /* 0..2 to 0..65535 */ + x = x - 32768.0f; /* 0...65535 to -32768..32767 */ +#else + /* The fast way. */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ +#endif + + dst_s16[i] = (ma_int16)x; + } +} +#else +static MA_INLINE void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i4; + ma_uint64 count4; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + /* Unrolled. */ + i = 0; + count4 = count >> 2; + for (i4 = 0; i4 < count4; i4 += 1) { + float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + + float x0 = src_f32[i+0]; + float x1 = src_f32[i+1]; + float x2 = src_f32[i+2]; + float x3 = src_f32[i+3]; + + x0 = x0 + d0; + x1 = x1 + d1; + x2 = x2 + d2; + x3 = x3 + d3; + + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; + + dst_s16[i+0] = (ma_int16)x0; + dst_s16[i+1] = (ma_int16)x1; + dst_s16[i+2] = (ma_int16)x2; + dst_s16[i+3] = (ma_int16)x3; + + i += 4; + } + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + __m128 d0; + __m128 d1; + __m128 x0; + __m128 x1; + + if (ditherMode == ma_dither_mode_none) { + d0 = _mm_set1_ps(0); + d1 = _mm_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } + + x0 = *((__m128*)(src_f32 + i) + 0); + x1 = *((__m128*)(src_f32 + i) + 1); + + x0 = _mm_add_ps(x0, d0); + x1 = _mm_add_ps(x1, d1); + + x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f)); + x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f)); + + _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1))); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* SSE2 */ + +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i16; + ma_uint64 count16; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + /* Both the input and output buffers need to be aligned to 32 bytes. */ + if ((((ma_uintptr)dst & 31) != 0) || (((ma_uintptr)src & 31) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* AVX2. AVX2 allows us to output 16 s16's at a time which means our loop is unrolled 16 times. */ + count16 = count >> 4; + for (i16 = 0; i16 < count16; i16 += 1) { + __m256 d0; + __m256 d1; + __m256 x0; + __m256 x1; + __m256i i0; + __m256i i1; + __m256i p0; + __m256i p1; + __m256i r; + + if (ditherMode == ma_dither_mode_none) { + d0 = _mm256_set1_ps(0); + d1 = _mm256_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm256_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm256_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm256_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm256_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } + + x0 = *((__m256*)(src_f32 + i) + 0); + x1 = *((__m256*)(src_f32 + i) + 1); + + x0 = _mm256_add_ps(x0, d0); + x1 = _mm256_add_ps(x1, d1); + + x0 = _mm256_mul_ps(x0, _mm256_set1_ps(32767.0f)); + x1 = _mm256_mul_ps(x1, _mm256_set1_ps(32767.0f)); + + /* Computing the final result is a little more complicated for AVX2 than SSE2. */ + i0 = _mm256_cvttps_epi32(x0); + i1 = _mm256_cvttps_epi32(x1); + p0 = _mm256_permute2x128_si256(i0, i1, 0 | 32); + p1 = _mm256_permute2x128_si256(i0, i1, 1 | 48); + r = _mm256_packs_epi32(p0, p1); + + _mm256_stream_si256(((__m256i*)(dst_s16 + i)), r); + + i += 16; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* AVX2 */ + +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + if (!ma_has_neon()) { + return ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + float32x4_t d0; + float32x4_t d1; + float32x4_t x0; + float32x4_t x1; + int32x4_t i0; + int32x4_t i1; + + if (ditherMode == ma_dither_mode_none) { + d0 = vmovq_n_f32(0); + d1 = vmovq_n_f32(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + float d0v[4]; + d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + float d1v[4]; + d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } else { + float d0v[4]; + d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + float d1v[4]; + d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } + + x0 = *((float32x4_t*)(src_f32 + i) + 0); + x1 = *((float32x4_t*)(src_f32 + i) + 1); + + x0 = vaddq_f32(x0, d0); + x1 = vaddq_f32(x1, d1); + + x0 = vmulq_n_f32(x0, 32767.0f); + x1 = vmulq_n_f32(x1, 32767.0f); + + i0 = vcvtq_s32_f32(x0); + i1 = vcvtq_s32_f32(x1); + *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1)); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* Neon */ +#endif /* MA_USE_REFERENCE_CONVERSION_APIS */ + +MA_API void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 r; + float x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 8388607.5f; /* 0..2 to 0..16777215 */ + x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */ +#else + /* The fast way. */ + x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */ +#endif + + r = (ma_int32)x; + dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0); + dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8); + dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16); + } + + (void)ditherMode; /* No dithering for f32 -> s24. */ +} + +static MA_INLINE void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const float* src_f32 = (const float*)src; + + ma_uint32 i; + for (i = 0; i < count; i += 1) { + double x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 2147483647.5; /* 0..2 to 0..4294967295 */ + x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */ +#else + /* The fast way. */ + x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */ +#endif + + dst_s32[i] = (ma_int32)x; + } + + (void)ditherMode; /* No dithering for f32 -> s32. */ +} + +static MA_INLINE void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(float)); +} + + +static void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + float* dst_f32 = (float*)dst; + const float** src_f32 = (const float**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame]; + } + } +} + +static void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +static void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + float** dst_f32 = (float**)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel]; + } + } +} + +static void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode) +{ + if (formatOut == formatIn) { + ma_copy_memory_64(pOut, pIn, sampleCount * ma_get_bytes_per_sample(formatOut)); + return; + } + + switch (formatIn) + { + case ma_format_u8: + { + switch (formatOut) + { + case ma_format_s16: ma_pcm_u8_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_u8_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_u8_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_u8_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s16: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s16_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s16_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s16_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s16_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s24: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s24_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s24_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s24_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s24_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s32_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_f32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_f32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_f32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_f32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_f32_to_s32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + default: break; + } +} + +MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode) +{ + ma_pcm_convert(pOut, formatOut, pIn, formatIn, frameCount * channels, ditherMode); +} + +MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames) +{ + if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) { + return; /* Invalid args. */ + } + + /* For efficiency we do this per format. */ + switch (format) { + case ma_format_s16: + { + const ma_int16* pSrcS16 = (const ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_int16* pDstS16 = (ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel]; + } + } + } break; + + case ma_format_f32: + { + const float* pSrcF32 = (const float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + +MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames) +{ + switch (format) + { + case ma_format_s16: + { + ma_int16* pDstS16 = (ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const ma_int16* pSrcS16 = (const ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame]; + } + } + } break; + + case ma_format_f32: + { + float* pDstF32 = (float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + + + +/************************************************************************************************************************************************************** + +Channel Maps + +**************************************************************************************************************************************************************/ +static void ma_get_standard_channel_map_microsoft(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + /* Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } break; + + case 3: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { +#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP + /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */ + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_CENTER; +#else + /* Quad. */ + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; +#endif + } break; + + case 5: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_SIDE_LEFT; + channelMap[5] = MA_CHANNEL_SIDE_RIGHT; + } break; + + case 7: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_CENTER; + channelMap[5] = MA_CHANNEL_SIDE_LEFT; + channelMap[6] = MA_CHANNEL_SIDE_RIGHT; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_alsa(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; + + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_BACK_CENTER; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_rfc3551(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_CENTER; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_SIDE_LEFT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_FRONT_RIGHT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); + } + } +} + +static void ma_get_standard_channel_map_flac(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_CENTER; + channelMap[5] = MA_CHANNEL_SIDE_LEFT; + channelMap[6] = MA_CHANNEL_SIDE_RIGHT; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_vorbis(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + /* In Vorbis' type 0 channel mapping, the first two channels are not always the standard left/right - it will have the center speaker where the right usually goes. Why?! */ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + channelMap[5] = MA_CHANNEL_LFE; + } break; + + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_SIDE_LEFT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + channelMap[6] = MA_CHANNEL_LFE; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_SIDE_LEFT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_LEFT; + channelMap[6] = MA_CHANNEL_BACK_RIGHT; + channelMap[7] = MA_CHANNEL_LFE; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_sound4(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; + + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + channelMap[6] = MA_CHANNEL_LFE; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_sndio(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 6: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; + } + + /* Remainder. */ + if (channels > 6) { + ma_uint32 iChannel; + for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); + } + } +} + +MA_API void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (standardChannelMap) + { + case ma_standard_channel_map_alsa: + { + ma_get_standard_channel_map_alsa(channels, channelMap); + } break; + + case ma_standard_channel_map_rfc3551: + { + ma_get_standard_channel_map_rfc3551(channels, channelMap); + } break; + + case ma_standard_channel_map_flac: + { + ma_get_standard_channel_map_flac(channels, channelMap); + } break; + + case ma_standard_channel_map_vorbis: + { + ma_get_standard_channel_map_vorbis(channels, channelMap); + } break; + + case ma_standard_channel_map_sound4: + { + ma_get_standard_channel_map_sound4(channels, channelMap); + } break; + + case ma_standard_channel_map_sndio: + { + ma_get_standard_channel_map_sndio(channels, channelMap); + } break; + + case ma_standard_channel_map_microsoft: + default: + { + ma_get_standard_channel_map_microsoft(channels, channelMap); + } break; + } +} + +MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut != NULL && pIn != NULL && channels > 0) { + MA_COPY_MEMORY(pOut, pIn, sizeof(*pOut) * channels); + } +} + +MA_API ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) +{ + if (channelMap == NULL) { + return MA_FALSE; + } + + /* A channel count of 0 is invalid. */ + if (channels == 0) { + return MA_FALSE; + } + + /* It does not make sense to have a mono channel when there is more than 1 channel. */ + if (channels > 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] == MA_CHANNEL_MONO) { + return MA_FALSE; + } + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]) +{ + ma_uint32 iChannel; + + if (channelMapA == channelMapB) { + return MA_FALSE; + } + + if (channels == 0 || channels > MA_MAX_CHANNELS) { + return MA_FALSE; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMapA[iChannel] != channelMapB[iChannel]) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) +{ + ma_uint32 iChannel; + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] != MA_CHANNEL_NONE) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition) +{ + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] == channelPosition) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + + +/************************************************************************************************************************************************************** + +Conversion Helpers + +**************************************************************************************************************************************************************/ +MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn) +{ + ma_data_converter_config config; + + config = ma_data_converter_config_init(formatIn, formatOut, channelsIn, channelsOut, sampleRateIn, sampleRateOut); + ma_get_standard_channel_map(ma_standard_channel_map_default, channelsOut, config.channelMapOut); + ma_get_standard_channel_map(ma_standard_channel_map_default, channelsIn, config.channelMapIn); + config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + + return ma_convert_frames_ex(pOut, frameCountOut, pIn, frameCountIn, &config); +} + +MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig) +{ + ma_result result; + ma_data_converter converter; + + if (frameCountIn == 0 || pConfig == NULL) { + return 0; + } + + result = ma_data_converter_init(pConfig, &converter); + if (result != MA_SUCCESS) { + return 0; /* Failed to initialize the data converter. */ + } + + if (pOut == NULL) { + frameCountOut = ma_data_converter_get_expected_output_frame_count(&converter, frameCountIn); + } else { + result = ma_data_converter_process_pcm_frames(&converter, pIn, &frameCountIn, pOut, &frameCountOut); + if (result != MA_SUCCESS) { + frameCountOut = 0; + } + } + + ma_data_converter_uninit(&converter); + return frameCountOut; +} + + +/************************************************************************************************************************************************************** + +Ring Buffer + +**************************************************************************************************************************************************************/ +static MA_INLINE ma_uint32 ma_rb__extract_offset_in_bytes(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x7FFFFFFF; +} + +static MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x80000000; +} + +static MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedReadOffset)); +} + +static MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedWriteOffset)); +} + +static MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 offsetLoopFlag) +{ + return offsetLoopFlag | offsetInBytes; +} + +static MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag) +{ + MA_ASSERT(pOffsetInBytes != NULL); + MA_ASSERT(pOffsetLoopFlag != NULL); + + *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset); + *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset); +} + + +MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + ma_result result; + const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1); + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes == 0 || subbufferCount == 0) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes > maxSubBufferSize) { + return MA_INVALID_ARGS; /* Maximum buffer size is ~2GB. The most significant bit is a flag for use internally. */ + } + + + MA_ZERO_OBJECT(pRB); + + result = ma_allocation_callbacks_init_copy(&pRB->allocationCallbacks, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes; + pRB->subbufferCount = (ma_uint32)subbufferCount; + + if (pOptionalPreallocatedBuffer != NULL) { + pRB->subbufferStrideInBytes = (ma_uint32)subbufferStrideInBytes; + pRB->pBuffer = pOptionalPreallocatedBuffer; + } else { + size_t bufferSizeInBytes; + + /* + Here is where we allocate our own buffer. We always want to align this to MA_SIMD_ALIGNMENT for future SIMD optimization opportunity. To do this + we need to make sure the stride is a multiple of MA_SIMD_ALIGNMENT. + */ + pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT; + + bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes; + pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT, &pRB->allocationCallbacks); + if (pRB->pBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_MEMORY(pRB->pBuffer, bufferSizeInBytes); + pRB->ownsBuffer = MA_TRUE; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_rb_uninit(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + if (pRB->ownsBuffer) { + ma_aligned_free(pRB->pBuffer, &pRB->allocationCallbacks); + } +} + +MA_API void ma_rb_reset(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + pRB->encodedReadOffset = 0; + pRB->encodedWriteOffset = 0; +} + +MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never move ahead of the write pointer. */ + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* + The number of bytes available depends on whether or not the read and write pointers are on the same loop iteration. If so, we + can only read up to the write pointer. If not, we can only read up to the end of the buffer. + */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + bytesAvailable = writeOffsetInBytes - readOffsetInBytes; + } else { + bytesAvailable = pRB->subbufferSizeInBytes - readOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + (*ppBufferOut) = ma_rb__get_read_ptr(pRB); + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + /* Validate the buffer. */ + if (pBufferOut != ma_rb__get_read_ptr(pRB)) { + return MA_INVALID_ARGS; + } + + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + sizeInBytes); + if (newReadOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newReadOffsetLoopFlag = readOffsetLoopFlag; + if (newReadOffsetInBytes == pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = 0; + newReadOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes)); + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never overtake the read buffer. */ + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* + In the case of writing, if the write pointer and the read pointer are on the same loop iteration we can only + write up to the end of the buffer. Otherwise we can only write up to the read pointer. The write pointer should + never overtake the read pointer. + */ + if (writeOffsetLoopFlag == readOffsetLoopFlag) { + bytesAvailable = pRB->subbufferSizeInBytes - writeOffsetInBytes; + } else { + bytesAvailable = readOffsetInBytes - writeOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + *ppBufferOut = ma_rb__get_write_ptr(pRB); + + /* Clear the buffer if desired. */ + if (pRB->clearOnWriteAcquire) { + MA_ZERO_MEMORY(*ppBufferOut, *pSizeInBytes); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + /* Validate the buffer. */ + if (pBufferOut != ma_rb__get_write_ptr(pRB)) { + return MA_INVALID_ARGS; + } + + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + sizeInBytes); + if (newWriteOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + if (newWriteOffsetInBytes == pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = 0; + newWriteOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes)); + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL || offsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; + } + + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newReadOffsetInBytes = readOffsetInBytes; + newReadOffsetLoopFlag = readOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + if ((readOffsetInBytes + offsetInBytes) > writeOffsetInBytes) { + newReadOffsetInBytes = writeOffsetInBytes; + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } else { + /* May end up looping. */ + if ((readOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newReadOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newWriteOffsetInBytes = writeOffsetInBytes; + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + /* May end up looping. */ + if ((writeOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newWriteOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } else { + if ((writeOffsetInBytes + offsetInBytes) > readOffsetInBytes) { + newWriteOffsetInBytes = readOffsetInBytes; + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + + if (pRB == NULL) { + return 0; + } + + readOffset = pRB->encodedReadOffset; + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = pRB->encodedWriteOffset; + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + return writeOffsetInBytes - readOffsetInBytes; + } else { + return writeOffsetInBytes + (pRB->subbufferSizeInBytes - readOffsetInBytes); + } +} + +MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB) +{ + ma_int32 dist; + + if (pRB == NULL) { + return 0; + } + + dist = ma_rb_pointer_distance(pRB); + if (dist < 0) { + return 0; + } + + return dist; +} + +MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(pRB) - ma_rb_pointer_distance(pRB)); +} + +MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->subbufferSizeInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + if (pRB->subbufferStrideInBytes == 0) { + return (size_t)pRB->subbufferSizeInBytes; + } + + return (size_t)pRB->subbufferStrideInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return subbufferIndex * ma_rb_get_subbuffer_stride(pRB); +} + +MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_offset_ptr(pBuffer, ma_rb_get_subbuffer_offset(pRB, subbufferIndex)); +} + + +static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + + return ma_get_bytes_per_frame(pRB->format, pRB->channels); +} + +MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + ma_uint32 bpf; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pRB); + + bpf = ma_get_bytes_per_frame(format, channels); + if (bpf == 0) { + return MA_INVALID_ARGS; + } + + result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, pAllocationCallbacks, &pRB->rb); + if (result != MA_SUCCESS) { + return result; + } + + pRB->format = format; + pRB->channels = channels; + + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_rb_uninit(&pRB->rb); +} + +MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_rb_reset(&pRB->rb); +} + +MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL || pSizeInFrames == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_read(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / (size_t)ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_read(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut); +} + +MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_write(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_write(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut); +} + +MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_read(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_write(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_pointer_distance(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_read(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_write(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_stride(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_offset(&pRB->rb, subbufferIndex) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_rb_get_subbuffer_ptr(&pRB->rb, subbufferIndex, pBuffer); +} + + + +/************************************************************************************************************************************************************** + +Miscellaneous Helpers + +**************************************************************************************************************************************************************/ +MA_API const char* ma_result_description(ma_result result) +{ + switch (result) + { + case MA_SUCCESS: return "No error"; + case MA_ERROR: return "Unknown error"; + case MA_INVALID_ARGS: return "Invalid argument"; + case MA_INVALID_OPERATION: return "Invalid operation"; + case MA_OUT_OF_MEMORY: return "Out of memory"; + case MA_OUT_OF_RANGE: return "Out of range"; + case MA_ACCESS_DENIED: return "Permission denied"; + case MA_DOES_NOT_EXIST: return "Resource does not exist"; + case MA_ALREADY_EXISTS: return "Resource already exists"; + case MA_TOO_MANY_OPEN_FILES: return "Too many open files"; + case MA_INVALID_FILE: return "Invalid file"; + case MA_TOO_BIG: return "Too large"; + case MA_PATH_TOO_LONG: return "Path too long"; + case MA_NAME_TOO_LONG: return "Name too long"; + case MA_NOT_DIRECTORY: return "Not a directory"; + case MA_IS_DIRECTORY: return "Is a directory"; + case MA_DIRECTORY_NOT_EMPTY: return "Directory not empty"; + case MA_END_OF_FILE: return "End of file"; + case MA_NO_SPACE: return "No space available"; + case MA_BUSY: return "Device or resource busy"; + case MA_IO_ERROR: return "Input/output error"; + case MA_INTERRUPT: return "Interrupted"; + case MA_UNAVAILABLE: return "Resource unavailable"; + case MA_ALREADY_IN_USE: return "Resource already in use"; + case MA_BAD_ADDRESS: return "Bad address"; + case MA_BAD_SEEK: return "Illegal seek"; + case MA_BAD_PIPE: return "Broken pipe"; + case MA_DEADLOCK: return "Deadlock"; + case MA_TOO_MANY_LINKS: return "Too many links"; + case MA_NOT_IMPLEMENTED: return "Not implemented"; + case MA_NO_MESSAGE: return "No message of desired type"; + case MA_BAD_MESSAGE: return "Invalid message"; + case MA_NO_DATA_AVAILABLE: return "No data available"; + case MA_INVALID_DATA: return "Invalid data"; + case MA_TIMEOUT: return "Timeout"; + case MA_NO_NETWORK: return "Network unavailable"; + case MA_NOT_UNIQUE: return "Not unique"; + case MA_NOT_SOCKET: return "Socket operation on non-socket"; + case MA_NO_ADDRESS: return "Destination address required"; + case MA_BAD_PROTOCOL: return "Protocol wrong type for socket"; + case MA_PROTOCOL_UNAVAILABLE: return "Protocol not available"; + case MA_PROTOCOL_NOT_SUPPORTED: return "Protocol not supported"; + case MA_PROTOCOL_FAMILY_NOT_SUPPORTED: return "Protocol family not supported"; + case MA_ADDRESS_FAMILY_NOT_SUPPORTED: return "Address family not supported"; + case MA_SOCKET_NOT_SUPPORTED: return "Socket type not supported"; + case MA_CONNECTION_RESET: return "Connection reset"; + case MA_ALREADY_CONNECTED: return "Already connected"; + case MA_NOT_CONNECTED: return "Not connected"; + case MA_CONNECTION_REFUSED: return "Connection refused"; + case MA_NO_HOST: return "No host"; + case MA_IN_PROGRESS: return "Operation in progress"; + case MA_CANCELLED: return "Operation cancelled"; + case MA_MEMORY_ALREADY_MAPPED: return "Memory already mapped"; + case MA_AT_END: return "Reached end of collection"; + + case MA_FORMAT_NOT_SUPPORTED: return "Format not supported"; + case MA_DEVICE_TYPE_NOT_SUPPORTED: return "Device type not supported"; + case MA_SHARE_MODE_NOT_SUPPORTED: return "Share mode not supported"; + case MA_NO_BACKEND: return "No backend"; + case MA_NO_DEVICE: return "No device"; + case MA_API_NOT_FOUND: return "API not found"; + case MA_INVALID_DEVICE_CONFIG: return "Invalid device config"; + + case MA_DEVICE_NOT_INITIALIZED: return "Device not initialized"; + case MA_DEVICE_NOT_STARTED: return "Device not started"; + + case MA_FAILED_TO_INIT_BACKEND: return "Failed to initialize backend"; + case MA_FAILED_TO_OPEN_BACKEND_DEVICE: return "Failed to open backend device"; + case MA_FAILED_TO_START_BACKEND_DEVICE: return "Failed to start backend device"; + case MA_FAILED_TO_STOP_BACKEND_DEVICE: return "Failed to stop backend device"; + + default: return "Unknown error"; + } +} + +MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return ma__malloc_from_callbacks(sz, pAllocationCallbacks); + } else { + return ma__malloc_default(sz, NULL); + } +} + +MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* This requires a native implementation of realloc(). */ + } + } else { + return ma__realloc_default(p, sz, NULL); + } +} + +MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma__free_default(p, NULL); + } +} + +MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t extraBytes; + void* pUnaligned; + void* pAligned; + + if (alignment == 0) { + return 0; + } + + extraBytes = alignment-1 + sizeof(void*); + + pUnaligned = ma_malloc(sz + extraBytes, pAllocationCallbacks); + if (pUnaligned == NULL) { + return NULL; + } + + pAligned = (void*)(((ma_uintptr)pUnaligned + extraBytes) & ~((ma_uintptr)(alignment-1))); + ((void**)pAligned)[-1] = pUnaligned; + + return pAligned; +} + +MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_free(((void**)p)[-1], pAllocationCallbacks); +} + +MA_API const char* ma_get_format_name(ma_format format) +{ + switch (format) + { + case ma_format_unknown: return "Unknown"; + case ma_format_u8: return "8-bit Unsigned Integer"; + case ma_format_s16: return "16-bit Signed Integer"; + case ma_format_s24: return "24-bit Signed Integer (Tightly Packed)"; + case ma_format_s32: return "32-bit Signed Integer"; + case ma_format_f32: return "32-bit IEEE Floating Point"; + default: return "Invalid"; + } +} + +MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels) +{ + ma_uint32 i; + for (i = 0; i < channels; ++i) { + pOut[i] = ma_mix_f32(pInA[i], pInB[i], factor); + } +} + + +MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format) +{ + ma_uint32 sizes[] = { + 0, /* unknown */ + 1, /* u8 */ + 2, /* s16 */ + 3, /* s24 */ + 4, /* s32 */ + 4, /* f32 */ + }; + return sizes[format]; +} + + +/************************************************************************************************************************************************************** + +Decoding + +**************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING + +static size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +{ + size_t bytesRead; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pBufferOut != NULL); + + bytesRead = pDecoder->onRead(pDecoder, pBufferOut, bytesToRead); + pDecoder->readPointer += bytesRead; + + return bytesRead; +} + +static ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +{ + ma_bool32 wasSuccessful; + + MA_ASSERT(pDecoder != NULL); + + wasSuccessful = pDecoder->onSeek(pDecoder, byteOffset, origin); + if (wasSuccessful) { + if (origin == ma_seek_origin_start) { + pDecoder->readPointer = (ma_uint64)byteOffset; + } else { + pDecoder->readPointer += byteOffset; + } + } + + return wasSuccessful; +} + + +MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate) +{ + ma_decoder_config config; + MA_ZERO_OBJECT(&config); + config.format = outputFormat; + config.channels = outputChannels; + config.sampleRate = outputSampleRate; + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.resampling.speex.quality = 3; + + /* Note that we are intentionally leaving the channel map empty here which will cause the default channel map to be used. */ + + return config; +} + +MA_API ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig) +{ + ma_decoder_config config; + if (pConfig != NULL) { + config = *pConfig; + } else { + MA_ZERO_OBJECT(&config); + } + + return config; +} + +static ma_result ma_decoder__init_data_converter(ma_decoder* pDecoder, const ma_decoder_config* pConfig) +{ + ma_data_converter_config converterConfig; + + MA_ASSERT(pDecoder != NULL); + + /* Output format. */ + if (pConfig->format == ma_format_unknown) { + pDecoder->outputFormat = pDecoder->internalFormat; + } else { + pDecoder->outputFormat = pConfig->format; + } + + if (pConfig->channels == 0) { + pDecoder->outputChannels = pDecoder->internalChannels; + } else { + pDecoder->outputChannels = pConfig->channels; + } + + if (pConfig->sampleRate == 0) { + pDecoder->outputSampleRate = pDecoder->internalSampleRate; + } else { + pDecoder->outputSampleRate = pConfig->sampleRate; + } + + if (ma_channel_map_blank(pDecoder->outputChannels, pConfig->channelMap)) { + ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->outputChannels, pDecoder->outputChannelMap); + } else { + MA_COPY_MEMORY(pDecoder->outputChannelMap, pConfig->channelMap, sizeof(pConfig->channelMap)); + } + + + converterConfig = ma_data_converter_config_init( + pDecoder->internalFormat, pDecoder->outputFormat, + pDecoder->internalChannels, pDecoder->outputChannels, + pDecoder->internalSampleRate, pDecoder->outputSampleRate + ); + ma_channel_map_copy(converterConfig.channelMapIn, pDecoder->internalChannelMap, pDecoder->internalChannels); + ma_channel_map_copy(converterConfig.channelMapOut, pDecoder->outputChannelMap, pDecoder->outputChannels); + converterConfig.channelMixMode = pConfig->channelMixMode; + converterConfig.ditherMode = pConfig->ditherMode; + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; /* Never allow dynamic sample rate conversion. Setting this to true will disable passthrough optimizations. */ + converterConfig.resampling.algorithm = pConfig->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pConfig->resampling.linear.lpfOrder; + converterConfig.resampling.speex.quality = pConfig->resampling.speex.quality; + + return ma_data_converter_init(&converterConfig, &pDecoder->converter); +} + +/* WAV */ +#ifdef dr_wav_h +#define MA_HAS_WAV + +static size_t ma_decoder_internal_on_read__wav(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); +} + +static drwav_bool32 ma_decoder_internal_on_seek__wav(void* pUserData, int offset, drwav_seek_origin origin) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_seek_bytes(pDecoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); +} + +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__wav(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + drwav* pWav; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + + pWav = (drwav*)pDecoder->pInternalDecoder; + MA_ASSERT(pWav != NULL); + + switch (pDecoder->internalFormat) { + case ma_format_s16: return drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pFramesOut); + case ma_format_s32: return drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pFramesOut); + case ma_format_f32: return drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pFramesOut); + default: break; + } + + /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + drwav* pWav; + drwav_bool32 result; + + pWav = (drwav*)pDecoder->pInternalDecoder; + MA_ASSERT(pWav != NULL); + + result = drwav_seek_to_pcm_frame(pWav, frameIndex); + if (result) { + return MA_SUCCESS; + } else { + return MA_ERROR; + } +} + +static ma_result ma_decoder_internal_on_uninit__wav(ma_decoder* pDecoder) +{ + drwav_uninit((drwav*)pDecoder->pInternalDecoder); + ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks); + return MA_SUCCESS; +} + +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__wav(ma_decoder* pDecoder) +{ + return ((drwav*)pDecoder->pInternalDecoder)->totalPCMFrameCount; +} + +static ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + drwav* pWav; + drwav_allocation_callbacks allocationCallbacks; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + pWav = (drwav*)ma__malloc_from_callbacks(sizeof(*pWav), &pDecoder->allocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; + + /* Try opening the decoder first. */ + if (!drwav_init(pWav, ma_decoder_internal_on_read__wav, ma_decoder_internal_on_seek__wav, pDecoder, &allocationCallbacks)) { + ma__free_from_callbacks(pWav, &pDecoder->allocationCallbacks); + return MA_ERROR; + } + + /* If we get here it means we successfully initialized the WAV decoder. We can now initialize the rest of the ma_decoder. */ + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__wav; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__wav; + pDecoder->onUninit = ma_decoder_internal_on_uninit__wav; + pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__wav; + pDecoder->pInternalDecoder = pWav; + + /* Try to be as optimal as possible for the internal format. If miniaudio does not support a format we will fall back to f32. */ + pDecoder->internalFormat = ma_format_unknown; + switch (pWav->translatedFormatTag) { + case DR_WAVE_FORMAT_PCM: + { + if (pWav->bitsPerSample == 8) { + pDecoder->internalFormat = ma_format_s16; + } else if (pWav->bitsPerSample == 16) { + pDecoder->internalFormat = ma_format_s16; + } else if (pWav->bitsPerSample == 32) { + pDecoder->internalFormat = ma_format_s32; + } + } break; + + case DR_WAVE_FORMAT_IEEE_FLOAT: + { + if (pWav->bitsPerSample == 32) { + pDecoder->internalFormat = ma_format_f32; + } + } break; + + case DR_WAVE_FORMAT_ALAW: + case DR_WAVE_FORMAT_MULAW: + case DR_WAVE_FORMAT_ADPCM: + case DR_WAVE_FORMAT_DVI_ADPCM: + { + pDecoder->internalFormat = ma_format_s16; + } break; + } + + if (pDecoder->internalFormat == ma_format_unknown) { + pDecoder->internalFormat = ma_format_f32; + } + + pDecoder->internalChannels = pWav->channels; + pDecoder->internalSampleRate = pWav->sampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDecoder->internalChannels, pDecoder->internalChannelMap); + + return MA_SUCCESS; +} +#endif /* dr_wav_h */ + +/* FLAC */ +#ifdef dr_flac_h +#define MA_HAS_FLAC + +static size_t ma_decoder_internal_on_read__flac(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); +} + +static drflac_bool32 ma_decoder_internal_on_seek__flac(void* pUserData, int offset, drflac_seek_origin origin) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_seek_bytes(pDecoder, offset, (origin == drflac_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); +} + +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__flac(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + drflac* pFlac; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + + pFlac = (drflac*)pDecoder->pInternalDecoder; + MA_ASSERT(pFlac != NULL); + + switch (pDecoder->internalFormat) { + case ma_format_s16: return drflac_read_pcm_frames_s16(pFlac, frameCount, (drflac_int16*)pFramesOut); + case ma_format_s32: return drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pFramesOut); + case ma_format_f32: return drflac_read_pcm_frames_f32(pFlac, frameCount, (float*)pFramesOut); + default: break; + } + + /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + drflac* pFlac; + drflac_bool32 result; + + pFlac = (drflac*)pDecoder->pInternalDecoder; + MA_ASSERT(pFlac != NULL); + + result = drflac_seek_to_pcm_frame(pFlac, frameIndex); + if (result) { + return MA_SUCCESS; + } else { + return MA_ERROR; + } +} + +static ma_result ma_decoder_internal_on_uninit__flac(ma_decoder* pDecoder) +{ + drflac_close((drflac*)pDecoder->pInternalDecoder); + return MA_SUCCESS; +} + +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__flac(ma_decoder* pDecoder) +{ + return ((drflac*)pDecoder->pInternalDecoder)->totalPCMFrameCount; +} + +static ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + drflac* pFlac; + drflac_allocation_callbacks allocationCallbacks; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; + + /* Try opening the decoder first. */ + pFlac = drflac_open(ma_decoder_internal_on_read__flac, ma_decoder_internal_on_seek__flac, pDecoder, &allocationCallbacks); + if (pFlac == NULL) { + return MA_ERROR; + } + + /* If we get here it means we successfully initialized the FLAC decoder. We can now initialize the rest of the ma_decoder. */ + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__flac; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__flac; + pDecoder->onUninit = ma_decoder_internal_on_uninit__flac; + pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__flac; + pDecoder->pInternalDecoder = pFlac; + + /* + dr_flac supports reading as s32, s16 and f32. Try to do a one-to-one mapping if possible, but fall back to s32 if not. s32 is the "native" FLAC format + since it's the only one that's truly lossless. + */ + pDecoder->internalFormat = ma_format_s32; + if (pConfig->format == ma_format_s16) { + pDecoder->internalFormat = ma_format_s16; + } else if (pConfig->format == ma_format_f32) { + pDecoder->internalFormat = ma_format_f32; + } + + pDecoder->internalChannels = pFlac->channels; + pDecoder->internalSampleRate = pFlac->sampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_flac, pDecoder->internalChannels, pDecoder->internalChannelMap); + + return MA_SUCCESS; +} +#endif /* dr_flac_h */ + +/* Vorbis */ +#ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H +#define MA_HAS_VORBIS + +/* The size in bytes of each chunk of data to read from the Vorbis stream. */ +#define MA_VORBIS_DATA_CHUNK_SIZE 4096 + +typedef struct +{ + stb_vorbis* pInternalVorbis; + ma_uint8* pData; + size_t dataSize; + size_t dataCapacity; + ma_uint32 framesConsumed; /* The number of frames consumed in ppPacketData. */ + ma_uint32 framesRemaining; /* The number of frames remaining in ppPacketData. */ + float** ppPacketData; +} ma_vorbis_decoder; + +static ma_uint64 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + float* pFramesOutF; + ma_uint64 totalFramesRead; + + MA_ASSERT(pVorbis != NULL); + MA_ASSERT(pDecoder != NULL); + + pFramesOutF = (float*)pFramesOut; + + totalFramesRead = 0; + while (frameCount > 0) { + /* Read from the in-memory buffer first. */ + while (pVorbis->framesRemaining > 0 && frameCount > 0) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pDecoder->internalChannels; ++iChannel) { + pFramesOutF[0] = pVorbis->ppPacketData[iChannel][pVorbis->framesConsumed]; + pFramesOutF += 1; + } + + pVorbis->framesConsumed += 1; + pVorbis->framesRemaining -= 1; + frameCount -= 1; + totalFramesRead += 1; + } + + if (frameCount == 0) { + break; + } + + MA_ASSERT(pVorbis->framesRemaining == 0); + + /* We've run out of cached frames, so decode the next packet and continue iteration. */ + do + { + int samplesRead; + int consumedDataSize; + + if (pVorbis->dataSize > INT_MAX) { + break; /* Too big. */ + } + + samplesRead = 0; + consumedDataSize = stb_vorbis_decode_frame_pushdata(pVorbis->pInternalVorbis, pVorbis->pData, (int)pVorbis->dataSize, NULL, (float***)&pVorbis->ppPacketData, &samplesRead); + if (consumedDataSize != 0) { + size_t leftoverDataSize = (pVorbis->dataSize - (size_t)consumedDataSize); + size_t i; + for (i = 0; i < leftoverDataSize; ++i) { + pVorbis->pData[i] = pVorbis->pData[i + consumedDataSize]; + } + + pVorbis->dataSize = leftoverDataSize; + pVorbis->framesConsumed = 0; + pVorbis->framesRemaining = samplesRead; + break; + } else { + /* Need more data. If there's any room in the existing buffer allocation fill that first. Otherwise expand. */ + size_t bytesRead; + if (pVorbis->dataCapacity == pVorbis->dataSize) { + /* No room. Expand. */ + size_t oldCap = pVorbis->dataCapacity; + size_t newCap = pVorbis->dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE; + ma_uint8* pNewData; + + pNewData = (ma_uint8*)ma__realloc_from_callbacks(pVorbis->pData, newCap, oldCap, &pDecoder->allocationCallbacks); + if (pNewData == NULL) { + return totalFramesRead; /* Out of memory. */ + } + + pVorbis->pData = pNewData; + pVorbis->dataCapacity = newCap; + } + + /* Fill in a chunk. */ + bytesRead = ma_decoder_read_bytes(pDecoder, pVorbis->pData + pVorbis->dataSize, (pVorbis->dataCapacity - pVorbis->dataSize)); + if (bytesRead == 0) { + return totalFramesRead; /* Error reading more data. */ + } + + pVorbis->dataSize += bytesRead; + } + } while (MA_TRUE); + } + + return totalFramesRead; +} + +static ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + float buffer[4096]; + + MA_ASSERT(pVorbis != NULL); + MA_ASSERT(pDecoder != NULL); + + /* + This is terribly inefficient because stb_vorbis does not have a good seeking solution with it's push API. Currently this just performs + a full decode right from the start of the stream. Later on I'll need to write a layer that goes through all of the Ogg pages until we + find the one containing the sample we need. Then we know exactly where to seek for stb_vorbis. + */ + if (!ma_decoder_seek_bytes(pDecoder, 0, ma_seek_origin_start)) { + return MA_ERROR; + } + + stb_vorbis_flush_pushdata(pVorbis->pInternalVorbis); + pVorbis->framesConsumed = 0; + pVorbis->framesRemaining = 0; + pVorbis->dataSize = 0; + + while (frameIndex > 0) { + ma_uint32 framesRead; + ma_uint32 framesToRead = ma_countof(buffer)/pDecoder->internalChannels; + if (framesToRead > frameIndex) { + framesToRead = (ma_uint32)frameIndex; + } + + framesRead = (ma_uint32)ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, buffer, framesToRead); + if (framesRead == 0) { + return MA_ERROR; + } + + frameIndex -= framesRead; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__vorbis(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; + MA_ASSERT(pVorbis != NULL); + + return ma_vorbis_decoder_seek_to_pcm_frame(pVorbis, pDecoder, frameIndex); +} + +static ma_result ma_decoder_internal_on_uninit__vorbis(ma_decoder* pDecoder) +{ + ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; + MA_ASSERT(pVorbis != NULL); + + stb_vorbis_close(pVorbis->pInternalVorbis); + ma__free_from_callbacks(pVorbis->pData, &pDecoder->allocationCallbacks); + ma__free_from_callbacks(pVorbis, &pDecoder->allocationCallbacks); + + return MA_SUCCESS; +} + +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__vorbis(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + ma_vorbis_decoder* pVorbis; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pDecoder->internalFormat == ma_format_f32); + + pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; + MA_ASSERT(pVorbis != NULL); + + return ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, pFramesOut, frameCount); +} + +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__vorbis(ma_decoder* pDecoder) +{ + /* No good way to do this with Vorbis. */ + (void)pDecoder; + return 0; +} + +static ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + stb_vorbis* pInternalVorbis = NULL; + size_t dataSize = 0; + size_t dataCapacity = 0; + ma_uint8* pData = NULL; + stb_vorbis_info vorbisInfo; + size_t vorbisDataSize; + ma_vorbis_decoder* pVorbis; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + /* We grow the buffer in chunks. */ + do + { + /* Allocate memory for a new chunk. */ + ma_uint8* pNewData; + size_t bytesRead; + int vorbisError = 0; + int consumedDataSize = 0; + size_t oldCapacity = dataCapacity; + + dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma__realloc_from_callbacks(pData, dataCapacity, oldCapacity, &pDecoder->allocationCallbacks); + if (pNewData == NULL) { + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + pData = pNewData; + + /* Fill in a chunk. */ + bytesRead = ma_decoder_read_bytes(pDecoder, pData + dataSize, (dataCapacity - dataSize)); + if (bytesRead == 0) { + return MA_ERROR; + } + + dataSize += bytesRead; + if (dataSize > INT_MAX) { + return MA_ERROR; /* Too big. */ + } + + pInternalVorbis = stb_vorbis_open_pushdata(pData, (int)dataSize, &consumedDataSize, &vorbisError, NULL); + if (pInternalVorbis != NULL) { + /* + If we get here it means we were able to open the stb_vorbis decoder. There may be some leftover bytes in our buffer, so + we need to move those bytes down to the front of the buffer since they'll be needed for future decoding. + */ + size_t leftoverDataSize = (dataSize - (size_t)consumedDataSize); + size_t i; + for (i = 0; i < leftoverDataSize; ++i) { + pData[i] = pData[i + consumedDataSize]; + } + + dataSize = leftoverDataSize; + break; /* Success. */ + } else { + if (vorbisError == VORBIS_need_more_data) { + continue; + } else { + return MA_ERROR; /* Failed to open the stb_vorbis decoder. */ + } + } + } while (MA_TRUE); + + + /* If we get here it means we successfully opened the Vorbis decoder. */ + vorbisInfo = stb_vorbis_get_info(pInternalVorbis); + + /* Don't allow more than MA_MAX_CHANNELS channels. */ + if (vorbisInfo.channels > MA_MAX_CHANNELS) { + stb_vorbis_close(pInternalVorbis); + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); + return MA_ERROR; /* Too many channels. */ + } + + vorbisDataSize = sizeof(ma_vorbis_decoder) + sizeof(float)*vorbisInfo.max_frame_size; + pVorbis = (ma_vorbis_decoder*)ma__malloc_from_callbacks(vorbisDataSize, &pDecoder->allocationCallbacks); + if (pVorbis == NULL) { + stb_vorbis_close(pInternalVorbis); + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_MEMORY(pVorbis, vorbisDataSize); + pVorbis->pInternalVorbis = pInternalVorbis; + pVorbis->pData = pData; + pVorbis->dataSize = dataSize; + pVorbis->dataCapacity = dataCapacity; + + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__vorbis; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__vorbis; + pDecoder->onUninit = ma_decoder_internal_on_uninit__vorbis; + pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__vorbis; + pDecoder->pInternalDecoder = pVorbis; + + /* The internal format is always f32. */ + pDecoder->internalFormat = ma_format_f32; + pDecoder->internalChannels = vorbisInfo.channels; + pDecoder->internalSampleRate = vorbisInfo.sample_rate; + ma_get_standard_channel_map(ma_standard_channel_map_vorbis, pDecoder->internalChannels, pDecoder->internalChannelMap); + + return MA_SUCCESS; +} +#endif /* STB_VORBIS_INCLUDE_STB_VORBIS_H */ + +/* MP3 */ +#ifdef dr_mp3_h +#define MA_HAS_MP3 + +static size_t ma_decoder_internal_on_read__mp3(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); +} + +static drmp3_bool32 ma_decoder_internal_on_seek__mp3(void* pUserData, int offset, drmp3_seek_origin origin) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_seek_bytes(pDecoder, offset, (origin == drmp3_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); +} + +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__mp3(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + drmp3* pMP3; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + + pMP3 = (drmp3*)pDecoder->pInternalDecoder; + MA_ASSERT(pMP3 != NULL); + +#if defined(DR_MP3_FLOAT_OUTPUT) + MA_ASSERT(pDecoder->internalFormat == ma_format_f32); + return drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pFramesOut); +#else + MA_ASSERT(pDecoder->internalFormat == ma_format_s16); + return drmp3_read_pcm_frames_s16(pMP3, frameCount, (drmp3_int16*)pFramesOut); +#endif +} + +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + drmp3* pMP3; + drmp3_bool32 result; + + pMP3 = (drmp3*)pDecoder->pInternalDecoder; + MA_ASSERT(pMP3 != NULL); + + result = drmp3_seek_to_pcm_frame(pMP3, frameIndex); + if (result) { + return MA_SUCCESS; + } else { + return MA_ERROR; + } +} + +static ma_result ma_decoder_internal_on_uninit__mp3(ma_decoder* pDecoder) +{ + drmp3_uninit((drmp3*)pDecoder->pInternalDecoder); + ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks); + return MA_SUCCESS; +} + +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__mp3(ma_decoder* pDecoder) +{ + return drmp3_get_pcm_frame_count((drmp3*)pDecoder->pInternalDecoder); +} + +static ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + drmp3* pMP3; + drmp3_allocation_callbacks allocationCallbacks; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + pMP3 = (drmp3*)ma__malloc_from_callbacks(sizeof(*pMP3), &pDecoder->allocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; + + /* + Try opening the decoder first. We always use whatever dr_mp3 reports for channel count and sample rate. The format is determined by + the presence of DR_MP3_FLOAT_OUTPUT. + */ + if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &allocationCallbacks)) { + ma__free_from_callbacks(pMP3, &pDecoder->allocationCallbacks); + return MA_ERROR; + } + + /* If we get here it means we successfully initialized the MP3 decoder. We can now initialize the rest of the ma_decoder. */ + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__mp3; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__mp3; + pDecoder->onUninit = ma_decoder_internal_on_uninit__mp3; + pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__mp3; + pDecoder->pInternalDecoder = pMP3; + + /* Internal format. */ +#if defined(DR_MP3_FLOAT_OUTPUT) + pDecoder->internalFormat = ma_format_f32; +#else + pDecoder->internalFormat = ma_format_s16; +#endif + pDecoder->internalChannels = pMP3->channels; + pDecoder->internalSampleRate = pMP3->sampleRate; + ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->internalChannels, pDecoder->internalChannelMap); + + return MA_SUCCESS; +} +#endif /* dr_mp3_h */ + +/* Raw */ +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__raw(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint32 bpf; + ma_uint64 totalFramesRead; + void* pRunningFramesOut; + + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + + /* For raw decoding we just read directly from the decoder's callbacks. */ + bpf = ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels); + + totalFramesRead = 0; + pRunningFramesOut = pFramesOut; + + while (totalFramesRead < frameCount) { + ma_uint64 framesReadThisIteration; + ma_uint64 framesToReadThisIteration = (frameCount - totalFramesRead); + if (framesToReadThisIteration > MA_SIZE_MAX) { + framesToReadThisIteration = MA_SIZE_MAX; + } + + framesReadThisIteration = ma_decoder_read_bytes(pDecoder, pRunningFramesOut, (size_t)framesToReadThisIteration * bpf) / bpf; /* Safe cast to size_t. */ + + totalFramesRead += framesReadThisIteration; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIteration * bpf); + + if (framesReadThisIteration < framesToReadThisIteration) { + break; /* Done. */ + } + } + + return totalFramesRead; +} + +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + ma_bool32 result = MA_FALSE; + ma_uint64 totalBytesToSeek; + + MA_ASSERT(pDecoder != NULL); + + if (pDecoder->onSeek == NULL) { + return MA_ERROR; + } + + /* The callback uses a 32 bit integer whereas we use a 64 bit unsigned integer. We just need to continuously seek until we're at the correct position. */ + totalBytesToSeek = frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels); + if (totalBytesToSeek < 0x7FFFFFFF) { + /* Simple case. */ + result = ma_decoder_seek_bytes(pDecoder, (int)(frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels)), ma_seek_origin_start); + } else { + /* Complex case. Start by doing a seek relative to the start. Then keep looping using offset seeking. */ + result = ma_decoder_seek_bytes(pDecoder, 0x7FFFFFFF, ma_seek_origin_start); + if (result == MA_TRUE) { + totalBytesToSeek -= 0x7FFFFFFF; + + while (totalBytesToSeek > 0) { + ma_uint64 bytesToSeekThisIteration = totalBytesToSeek; + if (bytesToSeekThisIteration > 0x7FFFFFFF) { + bytesToSeekThisIteration = 0x7FFFFFFF; + } + + result = ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_current); + if (result != MA_TRUE) { + break; + } + + totalBytesToSeek -= bytesToSeekThisIteration; + } + } + } + + if (result) { + return MA_SUCCESS; + } else { + return MA_ERROR; + } +} + +static ma_result ma_decoder_internal_on_uninit__raw(ma_decoder* pDecoder) +{ + (void)pDecoder; + return MA_SUCCESS; +} + +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__raw(ma_decoder* pDecoder) +{ + (void)pDecoder; + return 0; +} + +static ma_result ma_decoder_init_raw__internal(const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) +{ + MA_ASSERT(pConfigIn != NULL); + MA_ASSERT(pConfigOut != NULL); + MA_ASSERT(pDecoder != NULL); + + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__raw; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__raw; + pDecoder->onUninit = ma_decoder_internal_on_uninit__raw; + pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__raw; + + /* Internal format. */ + pDecoder->internalFormat = pConfigIn->format; + pDecoder->internalChannels = pConfigIn->channels; + pDecoder->internalSampleRate = pConfigIn->sampleRate; + ma_channel_map_copy(pDecoder->internalChannelMap, pConfigIn->channelMap, pConfigIn->channels); + + return MA_SUCCESS; +} + +static ma_result ma_decoder__init_allocation_callbacks(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + MA_ASSERT(pDecoder != NULL); + + if (pConfig != NULL) { + return ma_allocation_callbacks_init_copy(&pDecoder->allocationCallbacks, &pConfig->allocationCallbacks); + } else { + pDecoder->allocationCallbacks = ma_allocation_callbacks_init_default(); + return MA_SUCCESS; + } +} + +static ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + MA_ASSERT(pConfig != NULL); + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDecoder); + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; + } + + pDecoder->onRead = onRead; + pDecoder->onSeek = onSeek; + pDecoder->pUserData = pUserData; + + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__postinit(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__init_data_converter(pDecoder, pConfig); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_WAV + result = ma_decoder_init_wav__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_FLAC + result = ma_decoder_init_flac__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_VORBIS + result = ma_decoder_init_vorbis__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_MP3 + result = ma_decoder_init_mp3__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfigOut); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +static ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + /* Silence some warnings in the case that we don't have any decoder backends enabled. */ + (void)onRead; + (void)onSeek; + (void)pUserData; + (void)pConfig; + (void)pDecoder; + + /* We use trial and error to open a decoder. */ + +#ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif +#ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init__internal(onRead, onSeek, pUserData, &config, pDecoder); +} + + +static size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +{ + size_t bytesRemaining; + + MA_ASSERT(pDecoder->memory.dataSize >= pDecoder->memory.currentReadPos); + + bytesRemaining = pDecoder->memory.dataSize - pDecoder->memory.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + + if (bytesToRead > 0) { + MA_COPY_MEMORY(pBufferOut, pDecoder->memory.pData + pDecoder->memory.currentReadPos, bytesToRead); + pDecoder->memory.currentReadPos += bytesToRead; + } + + return bytesToRead; +} + +static ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +{ + if (origin == ma_seek_origin_current) { + if (byteOffset > 0) { + if (pDecoder->memory.currentReadPos + byteOffset > pDecoder->memory.dataSize) { + byteOffset = (int)(pDecoder->memory.dataSize - pDecoder->memory.currentReadPos); /* Trying to seek too far forward. */ + } + } else { + if (pDecoder->memory.currentReadPos < (size_t)-byteOffset) { + byteOffset = -(int)pDecoder->memory.currentReadPos; /* Trying to seek too far backwards. */ + } + } + + /* This will never underflow thanks to the clamps above. */ + pDecoder->memory.currentReadPos += byteOffset; + } else { + if ((ma_uint32)byteOffset <= pDecoder->memory.dataSize) { + pDecoder->memory.currentReadPos = byteOffset; + } else { + pDecoder->memory.currentReadPos = pDecoder->memory.dataSize; /* Trying to seek too far forward. */ + } + } + + return MA_TRUE; +} + +static ma_result ma_decoder__preinit_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + pDecoder->memory.pData = (const ma_uint8*)pData; + pDecoder->memory.dataSize = dataSize; + pDecoder->memory.currentReadPos = 0; + + (void)pConfig; + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init__internal(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, &config, pDecoder); +} + +MA_API ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_WAV + result = ma_decoder_init_wav__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_FLAC + result = ma_decoder_init_flac__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_VORBIS + result = ma_decoder_init_vorbis__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#ifdef MA_HAS_MP3 + result = ma_decoder_init_mp3__internal(&config, pDecoder); +#else + result = MA_NO_BACKEND; +#endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +MA_API ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfigOut); /* Make sure the config is not NULL. */ + + result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); +} + +static const char* ma_path_file_name(const char* path) +{ + const char* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + +static const wchar_t* ma_path_file_name_w(const wchar_t* path) +{ + const wchar_t* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + + +static const char* ma_path_extension(const char* path) +{ + const char* extension; + const char* lastOccurance; + + if (path == NULL) { + path = ""; + } + + extension = ma_path_file_name(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + +static const wchar_t* ma_path_extension_w(const wchar_t* path) +{ + const wchar_t* extension; + const wchar_t* lastOccurance; + + if (path == NULL) { + path = L""; + } + + extension = ma_path_file_name_w(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + + +static ma_bool32 ma_path_extension_equal(const char* path, const char* extension) +{ + const char* ext1; + const char* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension(path); + +#if defined(_MSC_VER) || defined(__DMC__) + return _stricmp(ext1, ext2) == 0; +#else + return strcasecmp(ext1, ext2) == 0; +#endif +} + +static ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension) +{ + const wchar_t* ext1; + const wchar_t* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension_w(path); + +#if defined(_MSC_VER) || defined(__DMC__) + return _wcsicmp(ext1, ext2) == 0; +#else + /* + I'm not aware of a wide character version of strcasecmp(). I'm therefore converting the extensions to multibyte strings and comparing those. This + isn't the most efficient way to do it, but it should work OK. + */ + { + char ext1MB[4096]; + char ext2MB[4096]; + const wchar_t* pext1 = ext1; + const wchar_t* pext2 = ext2; + mbstate_t mbs1; + mbstate_t mbs2; + + MA_ZERO_OBJECT(&mbs1); + MA_ZERO_OBJECT(&mbs2); + + if (wcsrtombs(ext1MB, &pext1, sizeof(ext1MB), &mbs1) == (size_t)-1) { + return MA_FALSE; + } + if (wcsrtombs(ext2MB, &pext2, sizeof(ext2MB), &mbs2) == (size_t)-1) { + return MA_FALSE; + } + + return strcasecmp(ext1MB, ext2MB) == 0; + } +#endif +} + + +static size_t ma_decoder__on_read_stdio(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pDecoder->pUserData); +} + +static ma_bool32 ma_decoder__on_seek_stdio(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +{ + return fseek((FILE*)pDecoder->pUserData, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} + +static ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + FILE* pFile; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDecoder); + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_fopen(&pFile, pFilePath, "rb"); + if (pFile == NULL) { + return result; + } + + /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */ + pDecoder->pUserData = pFile; + + return MA_SUCCESS; +} + +static ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + FILE* pFile; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDecoder); + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_wfopen(&pFile, pFilePath, L"rb", &pDecoder->allocationCallbacks); + if (pFile == NULL) { + return result; + } + + /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */ + pDecoder->pUserData = pFile; + + (void)pConfig; + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); /* This sets pDecoder->pUserData to a FILE*. */ + if (result != MA_SUCCESS) { + return result; + } + + /* WAV */ + if (ma_path_extension_equal(pFilePath, "wav")) { + result = ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* FLAC */ + if (ma_path_extension_equal(pFilePath, "flac")) { + result = ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* MP3 */ + if (ma_path_extension_equal(pFilePath, "mp3")) { + result = ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* Trial and error. */ + return ma_decoder_init(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_flac(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_vorbis(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_vorbis(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_mp3(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + + +MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); /* This sets pDecoder->pUserData to a FILE*. */ + if (result != MA_SUCCESS) { + return result; + } + + /* WAV */ + if (ma_path_extension_equal_w(pFilePath, L"wav")) { + result = ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* FLAC */ + if (ma_path_extension_equal_w(pFilePath, L"flac")) { + result = ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* MP3 */ + if (ma_path_extension_equal_w(pFilePath, L"mp3")) { + result = ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + + ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start); + } + + /* Trial and error. */ + return ma_decoder_init(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_wav_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_flac_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_vorbis_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_vorbis(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init_file_mp3_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->onUninit) { + pDecoder->onUninit(pDecoder); + } + + /* If we have a file handle, close it. */ + if (pDecoder->onRead == ma_decoder__on_read_stdio) { + fclose((FILE*)pDecoder->pUserData); + } + + ma_data_converter_uninit(&pDecoder->converter); + + return MA_SUCCESS; +} + +MA_API ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder) +{ + if (pDecoder == NULL) { + return 0; + } + + if (pDecoder->onGetLengthInPCMFrames) { + ma_uint64 nativeLengthInPCMFrames = pDecoder->onGetLengthInPCMFrames(pDecoder); + if (pDecoder->internalSampleRate == pDecoder->outputSampleRate) { + return nativeLengthInPCMFrames; + } else { + return ma_calculate_frame_count_after_resampling(pDecoder->outputSampleRate, pDecoder->internalSampleRate, nativeLengthInPCMFrames); + } + } + + return 0; +} + +MA_API ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) +{ + ma_result result; + ma_uint64 totalFramesReadOut; + ma_uint64 totalFramesReadIn; + void* pRunningFramesOut; + + if (pDecoder == NULL) { + return 0; + } + + if (pDecoder->onReadPCMFrames == NULL) { + return 0; + } + + /* Fast path. */ + if (pDecoder->converter.isPassthrough) { + return pDecoder->onReadPCMFrames(pDecoder, pFramesOut, frameCount); + } + + /* Getting here means we need to do data conversion. */ + totalFramesReadOut = 0; + totalFramesReadIn = 0; + pRunningFramesOut = pFramesOut; + + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In internal format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDecoder->converter, framesToReadThisIterationOut); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (requiredInputFrameCount > 0) { + framesReadThisIterationIn = pDecoder->onReadPCMFrames(pDecoder, pIntermediaryBuffer, framesToReadThisIterationIn); + totalFramesReadIn += framesReadThisIterationIn; + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + return totalFramesReadOut; +} + +MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + if (pDecoder == NULL) { + return 0; + } + + if (pDecoder->onSeekToPCMFrame) { + return pDecoder->onSeekToPCMFrame(pDecoder, frameIndex); + } + + /* Should never get here, but if we do it means onSeekToPCMFrame was not set by the backend. */ + return MA_INVALID_ARGS; +} + + +static ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_uint64 totalFrameCount; + ma_uint64 bpf; + ma_uint64 dataCapInFrames; + void* pPCMFramesOut; + + MA_ASSERT(pDecoder != NULL); + + totalFrameCount = 0; + bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); + + /* The frame count is unknown until we try reading. Thus, we just run in a loop. */ + dataCapInFrames = 0; + pPCMFramesOut = NULL; + for (;;) { + ma_uint64 frameCountToTryReading; + ma_uint64 framesJustRead; + + /* Make room if there's not enough. */ + if (totalFrameCount == dataCapInFrames) { + void* pNewPCMFramesOut; + ma_uint64 oldDataCapInFrames = dataCapInFrames; + ma_uint64 newDataCapInFrames = dataCapInFrames*2; + if (newDataCapInFrames == 0) { + newDataCapInFrames = 4096; + } + + if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) { + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_TOO_BIG; + } + + + pNewPCMFramesOut = (void*)ma__realloc_from_callbacks(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf), (size_t)(oldDataCapInFrames * bpf), &pDecoder->allocationCallbacks); + if (pNewPCMFramesOut == NULL) { + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + dataCapInFrames = newDataCapInFrames; + pPCMFramesOut = pNewPCMFramesOut; + } + + frameCountToTryReading = dataCapInFrames - totalFrameCount; + MA_ASSERT(frameCountToTryReading > 0); + + framesJustRead = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading); + totalFrameCount += framesJustRead; + + if (framesJustRead < frameCountToTryReading) { + break; + } + } + + + if (pConfigOut != NULL) { + pConfigOut->format = pDecoder->outputFormat; + pConfigOut->channels = pDecoder->outputChannels; + pConfigOut->sampleRate = pDecoder->outputSampleRate; + ma_channel_map_copy(pConfigOut->channelMap, pDecoder->outputChannelMap, pDecoder->outputChannels); + } + + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = pPCMFramesOut; + } else { + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); + } + + if (pFrameCountOut != NULL) { + *pFrameCountOut = totalFrameCount; + } + + ma_decoder_uninit(pDecoder); + return MA_SUCCESS; +} + +MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_decoder_config config; + ma_decoder decoder; + ma_result result; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + if (pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_file(pFilePath, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); +} + +MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_decoder_config config; + ma_decoder decoder; + ma_result result; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_memory(pData, dataSize, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); +} +#endif /* MA_NO_DECODING */ + + +#ifndef MA_NO_ENCODING + +#if defined(MA_HAS_WAV) +static size_t ma_encoder__internal_on_write_wav(void* pUserData, const void* pData, size_t bytesToWrite) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + MA_ASSERT(pEncoder != NULL); + + return pEncoder->onWrite(pEncoder, pData, bytesToWrite); +} + +static drwav_bool32 ma_encoder__internal_on_seek_wav(void* pUserData, int offset, drwav_seek_origin origin) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + MA_ASSERT(pEncoder != NULL); + + return pEncoder->onSeek(pEncoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); +} + +static ma_result ma_encoder__on_init_wav(ma_encoder* pEncoder) +{ + drwav_data_format wavFormat; + drwav_allocation_callbacks allocationCallbacks; + drwav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (drwav*)ma__malloc_from_callbacks(sizeof(*pWav), &pEncoder->config.allocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + wavFormat.container = drwav_container_riff; + wavFormat.channels = pEncoder->config.channels; + wavFormat.sampleRate = pEncoder->config.sampleRate; + wavFormat.bitsPerSample = ma_get_bytes_per_sample(pEncoder->config.format) * 8; + if (pEncoder->config.format == ma_format_f32) { + wavFormat.format = DR_WAVE_FORMAT_IEEE_FLOAT; + } else { + wavFormat.format = DR_WAVE_FORMAT_PCM; + } + + allocationCallbacks.pUserData = pEncoder->config.allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pEncoder->config.allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pEncoder->config.allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pEncoder->config.allocationCallbacks.onFree; + + if (!drwav_init_write(pWav, &wavFormat, ma_encoder__internal_on_write_wav, ma_encoder__internal_on_seek_wav, pEncoder, &allocationCallbacks)) { + return MA_ERROR; + } + + pEncoder->pInternalEncoder = pWav; + + return MA_SUCCESS; +} + +static void ma_encoder__on_uninit_wav(ma_encoder* pEncoder) +{ + drwav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (drwav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + drwav_uninit(pWav); + ma__free_from_callbacks(pWav, &pEncoder->config.allocationCallbacks); +} + +static ma_uint64 ma_encoder__on_write_pcm_frames_wav(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount) +{ + drwav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (drwav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + return drwav_write_pcm_frames(pWav, frameCount, pFramesIn); +} +#endif + +MA_API ma_encoder_config ma_encoder_config_init(ma_resource_format resourceFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_encoder_config config; + + MA_ZERO_OBJECT(&config); + config.resourceFormat = resourceFormat; + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + + return config; +} + +MA_API ma_result ma_encoder_preinit(const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + if (pEncoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEncoder); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format == ma_format_unknown || pConfig->channels == 0 || pConfig->sampleRate == 0) { + return MA_INVALID_ARGS; + } + + pEncoder->config = *pConfig; + + result = ma_allocation_callbacks_init_copy(&pEncoder->config.allocationCallbacks, &pConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init__internal(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, ma_encoder* pEncoder) +{ + ma_result result = MA_SUCCESS; + + /* This assumes ma_encoder_preinit() has been called prior. */ + MA_ASSERT(pEncoder != NULL); + + if (onWrite == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; + } + + pEncoder->onWrite = onWrite; + pEncoder->onSeek = onSeek; + pEncoder->pUserData = pUserData; + + switch (pEncoder->config.resourceFormat) + { + case ma_resource_format_wav: + { + #if defined(MA_HAS_WAV) + pEncoder->onInit = ma_encoder__on_init_wav; + pEncoder->onUninit = ma_encoder__on_uninit_wav; + pEncoder->onWritePCMFrames = ma_encoder__on_write_pcm_frames_wav; + #else + result = MA_NO_BACKEND; + #endif + } break; + + default: + { + result = MA_INVALID_ARGS; + } break; + } + + /* Getting here means we should have our backend callbacks set up. */ + if (result == MA_SUCCESS) { + result = pEncoder->onInit(pEncoder); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +MA_API size_t ma_encoder__on_write_stdio(ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite) +{ + return fwrite(pBufferIn, 1, bytesToWrite, (FILE*)pEncoder->pFile); +} + +MA_API ma_bool32 ma_encoder__on_seek_stdio(ma_encoder* pEncoder, int byteOffset, ma_seek_origin origin) +{ + return fseek((FILE*)pEncoder->pFile, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} + +MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + FILE* pFile; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_fopen(&pFile, pFilePath, "wb"); + if (pFile == NULL) { + return result; + } + + pEncoder->pFile = pFile; + + return ma_encoder_init__internal(ma_encoder__on_write_stdio, ma_encoder__on_seek_stdio, NULL, pEncoder); +} + +MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + FILE* pFile; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_wfopen(&pFile, pFilePath, L"wb", &pEncoder->config.allocationCallbacks); + if (pFile != NULL) { + return result; + } + + pEncoder->pFile = pFile; + + return ma_encoder_init__internal(ma_encoder__on_write_stdio, ma_encoder__on_seek_stdio, NULL, pEncoder); +} + +MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_encoder_init__internal(onWrite, onSeek, pUserData, pEncoder); +} + + +MA_API void ma_encoder_uninit(ma_encoder* pEncoder) +{ + if (pEncoder == NULL) { + return; + } + + if (pEncoder->onUninit) { + pEncoder->onUninit(pEncoder); + } + + /* If we have a file handle, close it. */ + if (pEncoder->onWrite == ma_encoder__on_write_stdio) { + fclose((FILE*)pEncoder->pFile); + } +} + + +MA_API ma_uint64 ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pEncoder == NULL || pFramesIn == NULL) { + return 0; + } + + return pEncoder->onWritePCMFrames(pEncoder, pFramesIn, frameCount); +} +#endif /* MA_NO_ENCODING */ + + + +/************************************************************************************************************************************************************** + +Generation + +**************************************************************************************************************************************************************/ +MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency) +{ + ma_waveform_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.type = type; + config.amplitude = amplitude; + config.frequency = frequency; + + return config; +} + +MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWaveform); + pWaveform->config = *pConfig; + pWaveform->advance = 1.0 / pWaveform->config.sampleRate; + pWaveform->time = 0; + + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.amplitude = amplitude; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.frequency = frequency; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->advance = 1.0 / sampleRate; + return MA_SUCCESS; +} + +static float ma_waveform_sine_f32(double time, double frequency, double amplitude) +{ + return (float)(ma_sin(MA_TAU_D * time * frequency) * amplitude); +} + +static ma_int16 ma_waveform_sine_s16(double time, double frequency, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sine_f32(time, frequency, amplitude)); +} + +static float ma_waveform_square_f32(double time, double frequency, double amplitude) +{ + double t = time * frequency; + double f = t - (ma_int64)t; + double r; + + if (f < 0.5) { + r = amplitude; + } else { + r = -amplitude; + } + + return (float)r; +} + +static ma_int16 ma_waveform_square_s16(double time, double frequency, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_square_f32(time, frequency, amplitude)); +} + +static float ma_waveform_triangle_f32(double time, double frequency, double amplitude) +{ + double t = time * frequency; + double f = t - (ma_int64)t; + double r; + + r = 2 * ma_abs(2 * (f - 0.5)) - 1; + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_triangle_s16(double time, double frequency, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_triangle_f32(time, frequency, amplitude)); +} + +static float ma_waveform_sawtooth_f32(double time, double frequency, double amplitude) +{ + double t = time * frequency; + double f = t - (ma_int64)t; + double r; + + r = 2 * (f - 0.5); + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_sawtooth_s16(double time, double frequency, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sawtooth_f32(time, frequency, amplitude)); +} + +static void ma_waveform_read_pcm_frames__sine(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sine_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__square(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_square_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__triangle(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_triangle_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__sawtooth(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sawtooth_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +MA_API ma_uint64 ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + if (pWaveform == NULL) { + return 0; + } + + if (pFramesOut != NULL) { + switch (pWaveform->config.type) + { + case ma_waveform_type_sine: + { + ma_waveform_read_pcm_frames__sine(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_square: + { + ma_waveform_read_pcm_frames__square(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_triangle: + { + ma_waveform_read_pcm_frames__triangle(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_sawtooth: + { + ma_waveform_read_pcm_frames__sawtooth(pWaveform, pFramesOut, frameCount); + } break; + + default: return 0; + } + } else { + pWaveform->time += pWaveform->advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ + } + + return frameCount; +} + + +MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude) +{ + ma_noise_config config; + MA_ZERO_OBJECT(&config); + + config.format = format; + config.channels = channels; + config.type = type; + config.seed = seed; + config.amplitude = amplitude; + + if (config.seed == 0) { + config.seed = MA_DEFAULT_LCG_SEED; + } + + return config; +} + +MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, ma_noise* pNoise) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNoise); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pNoise->config = *pConfig; + ma_lcg_seed(&pNoise->lcg, pConfig->seed); + + if (pNoise->config.type == ma_noise_type_pink) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.pink.accumulation[iChannel] = 0; + pNoise->state.pink.counter[iChannel] = 1; + } + } + + if (pNoise->config.type == ma_noise_type_brownian) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.brownian.accumulation[iChannel] = 0; + } + } + + return MA_SUCCESS; +} + +static MA_INLINE float ma_noise_f32_white(ma_noise* pNoise) +{ + return (float)(ma_lcg_rand_f64(&pNoise->lcg) * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_white(ma_noise* pNoise) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_white(pNoise)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__white(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_white(pNoise); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_white(pNoise); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_white(pNoise); + } + } + } + } else { + ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + ma_uint32 bpf = bps * pNoise->config.channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + float s = ma_noise_f32_white(pNoise); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE unsigned int ma_tzcnt32(unsigned int x) +{ + unsigned int n; + + /* Special case for odd numbers since they should happen about half the time. */ + if (x & 0x1) { + return 0; + } + + if (x == 0) { + return sizeof(x) << 3; + } + + n = 1; + if ((x & 0x0000FFFF) == 0) { x >>= 16; n += 16; } + if ((x & 0x000000FF) == 0) { x >>= 8; n += 8; } + if ((x & 0x0000000F) == 0) { x >>= 4; n += 4; } + if ((x & 0x00000003) == 0) { x >>= 2; n += 2; } + n -= x & 0x00000001; + + return n; +} + +/* +Pink noise generation based on Tonic (public domain) with modifications. https://github.com/TonicAudio/Tonic/blob/master/src/Tonic/Noise.h + +This is basically _the_ reference for pink noise from what I've found: http://www.firstpr.com.au/dsp/pink-noise/ +*/ +static MA_INLINE float ma_noise_f32_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + double binPrev; + double binNext; + unsigned int ibin; + + ibin = ma_tzcnt32(pNoise->state.pink.counter[iChannel]) & (ma_countof(pNoise->state.pink.bin[0]) - 1); + + binPrev = pNoise->state.pink.bin[iChannel][ibin]; + binNext = ma_lcg_rand_f64(&pNoise->lcg); + pNoise->state.pink.bin[iChannel][ibin] = binNext; + + pNoise->state.pink.accumulation[iChannel] += (binNext - binPrev); + pNoise->state.pink.counter[iChannel] += 1; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.pink.accumulation[iChannel]); + result /= 10; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_pink(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__pink(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_pink(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_pink(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_pink(pNoise, iChannel); + } + } + } + } else { + ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + ma_uint32 bpf = bps * pNoise->config.channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + float s = ma_noise_f32_pink(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE float ma_noise_f32_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.brownian.accumulation[iChannel]); + result /= 1.005; /* Don't escape the -1..1 range on average. */ + + pNoise->state.brownian.accumulation[iChannel] = result; + result /= 20; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_brownian(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__brownian(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_brownian(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_brownian(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_brownian(pNoise, iChannel); + } + } + } + } else { + ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + ma_uint32 bpf = bps * pNoise->config.channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) { + float s = ma_noise_f32_brownian(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + +MA_API ma_uint64 ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + if (pNoise == NULL) { + return 0; + } + + if (pNoise->config.type == ma_noise_type_white) { + return ma_noise_read_pcm_frames__white(pNoise, pFramesOut, frameCount); + } + + if (pNoise->config.type == ma_noise_type_pink) { + return ma_noise_read_pcm_frames__pink(pNoise, pFramesOut, frameCount); + } + + if (pNoise->config.type == ma_noise_type_brownian) { + return ma_noise_read_pcm_frames__brownian(pNoise, pFramesOut, frameCount); + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +/* End globally disabled warnings. */ +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + +#endif /* MINIAUDIO_IMPLEMENTATION */ + +/* +MAJOR CHANGES IN VERSION 0.9 +============================ +Version 0.9 includes major API changes, centered mostly around full-duplex and the rebrand to "miniaudio". Before I go into +detail about the major changes I would like to apologize. I know it's annoying dealing with breaking API changes, but I think +it's best to get these changes out of the way now while the library is still relatively young and unknown. + +There's been a lot of refactoring with this release so there's a good chance a few bugs have been introduced. I apologize in +advance for this. You may want to hold off on upgrading for the short term if you're worried. If mini_al v0.8.14 works for +you, and you don't need full-duplex support, you can avoid upgrading (though you won't be getting future bug fixes). + + +Rebranding to "miniaudio" +------------------------- +The decision was made to rename mini_al to miniaudio. Don't worry, it's the same project. The reason for this is simple: + +1) Having the word "audio" in the title makes it immediately clear that the library is related to audio; and +2) I don't like the look of the underscore. + +This rebrand has necessitated a change in namespace from "mal" to "ma". I know this is annoying, and I apologize, but it's +better to get this out of the road now rather than later. Also, since there are necessary API changes for full-duplex support +I think it's better to just get the namespace change over and done with at the same time as the full-duplex changes. I'm hoping +this will be the last of the major API changes. Fingers crossed! + +The implementation define is now "#define MINIAUDIO_IMPLEMENTATION". You can also use "#define MA_IMPLEMENTATION" if that's +your preference. + + +Full-Duplex Support +------------------- +The major feature added to version 0.9 is full-duplex. This has necessitated a few API changes. + +1) The data callback has now changed. Previously there was one type of callback for playback and another for capture. I wanted + to avoid a third callback just for full-duplex so the decision was made to break this API and unify the callbacks. Now, + there is just one callback which is the same for all three modes (playback, capture, duplex). The new callback looks like + the following: + + void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + This callback allows you to move data straight out of the input buffer and into the output buffer in full-duplex mode. In + playback-only mode, pInput will be null. Likewise, pOutput will be null in capture-only mode. The sample count is no longer + returned from the callback since it's not necessary for miniaudio anymore. + +2) The device config needed to change in order to support full-duplex. Full-duplex requires the ability to allow the client + to choose a different PCM format for the playback and capture sides. The old ma_device_config object simply did not allow + this and needed to change. With these changes you now specify the device ID, format, channels, channel map and share mode + on a per-playback and per-capture basis (see example below). The sample rate must be the same for playback and capture. + + Since the device config API has changed I have also decided to take the opportunity to simplify device initialization. Now, + the device ID, device type and callback user data are set in the config. ma_device_init() is now simplified down to taking + just the context, device config and a pointer to the device object being initialized. The rationale for this change is that + it just makes more sense to me that these are set as part of the config like everything else. + + Example device initialization: + + ma_device_config config = ma_device_config_init(ma_device_type_duplex); // Or ma_device_type_playback or ma_device_type_capture. + config.playback.pDeviceID = &myPlaybackDeviceID; // Or NULL for the default playback device. + config.playback.format = ma_format_f32; + config.playback.channels = 2; + config.capture.pDeviceID = &myCaptureDeviceID; // Or NULL for the default capture device. + config.capture.format = ma_format_s16; + config.capture.channels = 1; + config.sampleRate = 44100; + config.dataCallback = data_callback; + config.pUserData = &myUserData; + + result = ma_device_init(&myContext, &config, &device); + if (result != MA_SUCCESS) { + ... handle error ... + } + + Note that the "onDataCallback" member of ma_device_config has been renamed to "dataCallback". Also, "onStopCallback" has + been renamed to "stopCallback". + +This is the first pass for full-duplex and there is a known bug. You will hear crackling on the following backends when sample +rate conversion is required for the playback device: + - Core Audio + - JACK + - AAudio + - OpenSL + - WebAudio + +In addition to the above, not all platforms have been absolutely thoroughly tested simply because I lack the hardware for such +thorough testing. If you experience a bug, an issue report on GitHub or an email would be greatly appreciated (and a sample +program that reproduces the issue if possible). + + +Other API Changes +----------------- +In addition to the above, the following API changes have been made: + +- The log callback is no longer passed to ma_context_config_init(). Instead you need to set it manually after initialization. +- The onLogCallback member of ma_context_config has been renamed to "logCallback". +- The log callback now takes a logLevel parameter. The new callback looks like: void log_callback(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message) + - You can use ma_log_level_to_string() to convert the logLevel to human readable text if you want to log it. +- Some APIs have been renamed: + - mal_decoder_read() -> ma_decoder_read_pcm_frames() + - mal_decoder_seek_to_frame() -> ma_decoder_seek_to_pcm_frame() + - mal_sine_wave_read() -> ma_sine_wave_read_f32() + - mal_sine_wave_read_ex() -> ma_sine_wave_read_f32_ex() +- Some APIs have been removed: + - mal_device_get_buffer_size_in_bytes() + - mal_device_set_recv_callback() + - mal_device_set_send_callback() + - mal_src_set_input_sample_rate() + - mal_src_set_output_sample_rate() +- Error codes have been rearranged. If you're a binding maintainer you will need to update. +- The ma_backend enums have been rearranged to priority order. The rationale for this is to simplify automatic backend selection + and to make it easier to see the priority. If you're a binding maintainer you will need to update. +- ma_dsp has been renamed to ma_pcm_converter. The rationale for this change is that I'm expecting "ma_dsp" to conflict with + some future planned high-level APIs. +- For functions that take a pointer/count combo, such as ma_decoder_read_pcm_frames(), the parameter order has changed so that + the pointer comes before the count. The rationale for this is to keep it consistent with things like memcpy(). + + +Miscellaneous Changes +--------------------- +The following miscellaneous changes have also been made. + +- The AAudio backend has been added for Android 8 and above. This is Android's new "High-Performance Audio" API. (For the + record, this is one of the nicest audio APIs out there, just behind the BSD audio APIs). +- The WebAudio backend has been added. This is based on ScriptProcessorNode. This removes the need for SDL. +- The SDL and OpenAL backends have been removed. These were originally implemented to add support for platforms for which miniaudio + was not explicitly supported. These are no longer needed and have therefore been removed. +- Device initialization now fails if the requested share mode is not supported. If you ask for exclusive mode, you either get an + exclusive mode device, or an error. The rationale for this change is to give the client more control over how to handle cases + when the desired shared mode is unavailable. +- A lock-free ring buffer API has been added. There are two varients of this. "ma_rb" operates on bytes, whereas "ma_pcm_rb" + operates on PCM frames. +- The library is now licensed as a choice of Public Domain (Unlicense) _or_ MIT-0 (No Attribution) which is the same as MIT, but + removes the attribution requirement. The rationale for this is to support countries that don't recognize public domain. +*/ + +/* +REVISION HISTORY +================ +v0.10.4 - 2020-04-12 + - Fix a data conversion bug when converting from the client format to the native device format. + +v0.10.3 - 2020-04-07 + - Bring up to date with breaking changes to dr_mp3. + - Remove MA_NO_STDIO. This was causing compilation errors and the maintenance cost versus practical benefit is no longer worthwhile. + - Fix a bug with data conversion where it was unnecessarily converting to s16 or f32 and then straight back to the original format. + - Fix compilation errors and warnings with Visual Studio 2005. + - ALSA: Disable ALSA's automatic data conversion by default and add configuration options to the device config: + - alsa.noAutoFormat + - alsa.noAutoChannels + - alsa.noAutoResample + - WASAPI: Add some overrun recovery for ma_device_type_capture devices. + +v0.10.2 - 2020-03-22 + - Decorate some APIs with MA_API which were missed in the previous version. + - Fix a bug in ma_linear_resampler_set_rate() and ma_linear_resampler_set_rate_ratio(). + +v0.10.1 - 2020-03-17 + - Add MA_API decoration. This can be customized by defining it before including miniaudio.h. + - Fix a bug where opening a file would return a success code when in fact it failed. + - Fix compilation errors with Visual Studio 6 and 2003. + - Fix warnings on macOS. + +v0.10.0 - 2020-03-07 + - API CHANGE: Refactor data conversion APIs + - ma_format_converter has been removed. Use ma_convert_pcm_frames_format() instead. + - ma_channel_router has been replaced with ma_channel_converter. + - ma_src has been replaced with ma_resampler + - ma_pcm_converter has been replaced with ma_data_converter + - API CHANGE: Add support for custom memory allocation callbacks. The following APIs have been updated to take an extra parameter for the allocation + callbacks: + - ma_malloc() + - ma_realloc() + - ma_free() + - ma_aligned_malloc() + - ma_aligned_free() + - ma_rb_init() / ma_rb_init_ex() + - ma_pcm_rb_init() / ma_pcm_rb_init_ex() + - API CHANGE: Simplify latency specification in device configurations. The bufferSizeInFrames and bufferSizeInMilliseconds parameters have been replaced with + periodSizeInFrames and periodSizeInMilliseconds respectively. The previous variables defined the size of the entire buffer, whereas the new ones define the + size of a period. The following APIs have been removed since they are no longer relevant: + - ma_get_default_buffer_size_in_milliseconds() + - ma_get_default_buffer_size_in_frames() + - API CHANGE: ma_device_set_stop_callback() has been removed. If you require a stop callback, you must now set it via the device config just like the data + callback. + - API CHANGE: The ma_sine_wave API has been replaced with ma_waveform. The following APIs have been removed: + - ma_sine_wave_init() + - ma_sine_wave_read_f32() + - ma_sine_wave_read_f32_ex() + - API CHANGE: ma_convert_frames() has been updated to take an extra parameter which is the size of the output buffer in PCM frames. Parameters have also been + reordered. + - API CHANGE: ma_convert_frames_ex() has been changed to take a pointer to a ma_data_converter_config object to specify the input and output formats to + convert between. + - API CHANGE: ma_calculate_frame_count_after_src() has been renamed to ma_calculate_frame_count_after_resampling(). + - Add support for the following filters: + - Biquad (ma_biquad) + - First order low-pass (ma_lpf1) + - Second order low-pass (ma_lpf2) + - Low-pass with configurable order (ma_lpf) + - First order high-pass (ma_hpf1) + - Second order high-pass (ma_hpf2) + - High-pass with configurable order (ma_hpf) + - Second order band-pass (ma_bpf2) + - Band-pass with configurable order (ma_bpf) + - Second order peaking EQ (ma_peak2) + - Second order notching (ma_notch2) + - Second order low shelf (ma_loshelf2) + - Second order high shelf (ma_hishelf2) + - Add waveform generation API (ma_waveform) with support for the following: + - Sine + - Square + - Triangle + - Sawtooth + - Add noise generation API (ma_noise) with support for the following: + - White + - Pink + - Brownian + - Add encoding API (ma_encoder). This only supports outputting to WAV files via dr_wav. + - Add ma_result_description() which is used to retrieve a human readable description of a given result code. + - Result codes have been changed. Binding maintainers will need to update their result code constants. + - More meaningful result codes are now returned when a file fails to open. + - Internal functions have all been made static where possible. + - Fix potential crash when ma_device object's are not aligned to MA_SIMD_ALIGNMENT. + - Fix a bug in ma_decoder_get_length_in_pcm_frames() where it was returning the length based on the internal sample rate rather than the output sample rate. + - Fix bugs in some backends where the device is not drained properly in ma_device_stop(). + - Improvements to documentation. + +v0.9.10 - 2020-01-15 + - Fix compilation errors due to #if/#endif mismatches. + - WASAPI: Fix a bug where automatic stream routing is being performed for devices that are initialized with an explicit device ID. + - iOS: Fix a crash on device uninitialization. + +v0.9.9 - 2020-01-09 + - Fix compilation errors with MinGW. + - Fix compilation errors when compiling on Apple platforms. + - WASAPI: Add support for disabling hardware offloading. + - WASAPI: Add support for disabling automatic stream routing. + - Core Audio: Fix bugs in the case where the internal device uses deinterleaved buffers. + - Core Audio: Add support for controlling the session category (AVAudioSessionCategory) and options (AVAudioSessionCategoryOptions). + - JACK: Fix bug where incorrect ports are connected. + +v0.9.8 - 2019-10-07 + - WASAPI: Fix a potential deadlock when starting a full-duplex device. + - WASAPI: Enable automatic resampling by default. Disable with config.wasapi.noAutoConvertSRC. + - Core Audio: Fix bugs with automatic stream routing. + - Add support for controlling whether or not the content of the output buffer passed in to the data callback is pre-initialized + to zero. By default it will be initialized to zero, but this can be changed by setting noPreZeroedOutputBuffer in the device + config. Setting noPreZeroedOutputBuffer to true will leave the contents undefined. + - Add support for clipping samples after the data callback has returned. This only applies when the playback sample format is + configured as ma_format_f32. If you are doing clipping yourself, you can disable this overhead by setting noClip to true in + the device config. + - Add support for master volume control for devices. + - Use ma_device_set_master_volume() to set the volume to a factor between 0 and 1, where 0 is silence and 1 is full volume. + - Use ma_device_set_master_gain_db() to set the volume in decibels where 0 is full volume and < 0 reduces the volume. + - Fix warnings emitted by GCC when `__inline__` is undefined or defined as nothing. + +v0.9.7 - 2019-08-28 + - Add support for loopback mode (WASAPI only). + - To use this, set the device type to ma_device_type_loopback, and then fill out the capture section of the device config. + - If you need to capture from a specific output device, set the capture device ID to that of a playback device. + - Fix a crash when an error is posted in ma_device_init(). + - Fix a compilation error when compiling for ARM architectures. + - Fix a bug with the audio(4) backend where the device is incorrectly being opened in non-blocking mode. + - Fix memory leaks in the Core Audio backend. + - Minor refactoring to the WinMM, ALSA, PulseAudio, OSS, audio(4), sndio and null backends. + +v0.9.6 - 2019-08-04 + - Add support for loading decoders using a wchar_t string for file paths. + - Don't trigger an assert when ma_device_start() is called on a device that is already started. This will now log a warning + and return MA_INVALID_OPERATION. The same applies for ma_device_stop(). + - Try fixing an issue with PulseAudio taking a long time to start playback. + - Fix a bug in ma_convert_frames() and ma_convert_frames_ex(). + - Fix memory leaks in the WASAPI backend. + - Fix a compilation error with Visual Studio 2010. + +v0.9.5 - 2019-05-21 + - Add logging to ma_dlopen() and ma_dlsym(). + - Add ma_decoder_get_length_in_pcm_frames(). + - Fix a bug with capture on the OpenSL|ES backend. + - Fix a bug with the ALSA backend where a device would not restart after being stopped. + +v0.9.4 - 2019-05-06 + - Add support for C89. With this change, miniaudio should compile clean with GCC/Clang with "-std=c89 -ansi -pedantic" and + Microsoft compilers back to VC6. Other compilers should also work, but have not been tested. + +v0.9.3 - 2019-04-19 + - Fix compiler errors on GCC when compiling with -std=c99. + +v0.9.2 - 2019-04-08 + - Add support for per-context user data. + - Fix a potential bug with context configs. + - Fix some bugs with PulseAudio. + +v0.9.1 - 2019-03-17 + - Fix a bug where the output buffer is not getting zeroed out before calling the data callback. This happens when + the device is running in passthrough mode (not doing any data conversion). + - Fix an issue where the data callback is getting called too frequently on the WASAPI and DirectSound backends. + - Fix error on the UWP build. + - Fix a build error on Apple platforms. + +v0.9 - 2019-03-06 + - Rebranded to "miniaudio". All namespaces have been renamed from "mal" to "ma". + - API CHANGE: ma_device_init() and ma_device_config_init() have changed significantly: + - The device type, device ID and user data pointer have moved from ma_device_init() to the config. + - All variations of ma_device_config_init_*() have been removed in favor of just ma_device_config_init(). + - ma_device_config_init() now takes only one parameter which is the device type. All other properties need + to be set on the returned object directly. + - The onDataCallback and onStopCallback members of ma_device_config have been renamed to "dataCallback" + and "stopCallback". + - The ID of the physical device is now split into two: one for the playback device and the other for the + capture device. This is required for full-duplex. These are named "pPlaybackDeviceID" and "pCaptureDeviceID". + - API CHANGE: The data callback has changed. It now uses a unified callback for all device types rather than + being separate for each. It now takes two pointers - one containing input data and the other output data. This + design in required for full-duplex. The return value is now void instead of the number of frames written. The + new callback looks like the following: + void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + - API CHANGE: Remove the log callback parameter from ma_context_config_init(). With this change, + ma_context_config_init() now takes no parameters and the log callback is set via the structure directly. The + new policy for config initialization is that only mandatory settings are passed in to *_config_init(). The + "onLog" member of ma_context_config has been renamed to "logCallback". + - API CHANGE: Remove ma_device_get_buffer_size_in_bytes(). + - API CHANGE: Rename decoding APIs to "pcm_frames" convention. + - mal_decoder_read() -> ma_decoder_read_pcm_frames() + - mal_decoder_seek_to_frame() -> ma_decoder_seek_to_pcm_frame() + - API CHANGE: Rename sine wave reading APIs to f32 convention. + - mal_sine_wave_read() -> ma_sine_wave_read_f32() + - mal_sine_wave_read_ex() -> ma_sine_wave_read_f32_ex() + - API CHANGE: Remove some deprecated APIs + - mal_device_set_recv_callback() + - mal_device_set_send_callback() + - mal_src_set_input_sample_rate() + - mal_src_set_output_sample_rate() + - API CHANGE: Add log level to the log callback. New signature: + - void on_log(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message) + - API CHANGE: Changes to result codes. Constants have changed and unused codes have been removed. If you're + a binding mainainer you will need to update your result code constants. + - API CHANGE: Change the order of the ma_backend enums to priority order. If you are a binding maintainer, you + will need to update. + - API CHANGE: Rename mal_dsp to ma_pcm_converter. All functions have been renamed from mal_dsp_*() to + ma_pcm_converter_*(). All structures have been renamed from mal_dsp* to ma_pcm_converter*. + - API CHANGE: Reorder parameters of ma_decoder_read_pcm_frames() to be consistent with the new parameter order scheme. + - The resampling algorithm has been changed from sinc to linear. The rationale for this is that the sinc implementation + is too inefficient right now. This will hopefully be improved at a later date. + - Device initialization will no longer fall back to shared mode if exclusive mode is requested but is unusable. + With this change, if you request an device in exclusive mode, but exclusive mode is not supported, it will not + automatically fall back to shared mode. The client will need to reinitialize the device in shared mode if that's + what they want. + - Add ring buffer API. This is ma_rb and ma_pcm_rb, the difference being that ma_rb operates on bytes and + ma_pcm_rb operates on PCM frames. + - Add Web Audio backend. This is used when compiling with Emscripten. The SDL backend, which was previously + used for web support, will be removed in a future version. + - Add AAudio backend (Android Audio). This is the new priority backend for Android. Support for AAudio starts + with Android 8. OpenSL|ES is used as a fallback for older versions of Android. + - Remove OpenAL and SDL backends. + - Fix a possible deadlock when rapidly stopping the device after it has started. + - Update documentation. + - Change licensing to a choice of public domain _or_ MIT-0 (No Attribution). + +v0.8.14 - 2018-12-16 + - Core Audio: Fix a bug where the device state is not set correctly after stopping. + - Add support for custom weights to the channel router. + - Update decoders to use updated APIs in dr_flac, dr_mp3 and dr_wav. + +v0.8.13 - 2018-12-04 + - Core Audio: Fix a bug with channel mapping. + - Fix a bug with channel routing where the back/left and back/right channels have the wrong weight. + +v0.8.12 - 2018-11-27 + - Drop support for SDL 1.2. The Emscripten build now requires "-s USE_SDL=2". + - Fix a linking error with ALSA. + - Fix a bug on iOS where the device name is not set correctly. + +v0.8.11 - 2018-11-21 + - iOS bug fixes. + - Minor tweaks to PulseAudio. + +v0.8.10 - 2018-10-21 + - Core Audio: Fix a hang when uninitializing a device. + - Fix a bug where an incorrect value is returned from mal_device_stop(). + +v0.8.9 - 2018-09-28 + - Fix a bug with the SDL backend where device initialization fails. + +v0.8.8 - 2018-09-14 + - Fix Linux build with the ALSA backend. + - Minor documentation fix. + +v0.8.7 - 2018-09-12 + - Fix a bug with UWP detection. + +v0.8.6 - 2018-08-26 + - Automatically switch the internal device when the default device is unplugged. Note that this is still in the + early stages and not all backends handle this the same way. As of this version, this will not detect a default + device switch when changed from the operating system's audio preferences (unless the backend itself handles + this automatically). This is not supported in exclusive mode. + - WASAPI and Core Audio: Add support for stream routing. When the application is using a default device and the + user switches the default device via the operating system's audio preferences, miniaudio will automatically switch + the internal device to the new default. This is not supported in exclusive mode. + - WASAPI: Add support for hardware offloading via IAudioClient2. Only supported on Windows 8 and newer. + - WASAPI: Add support for low-latency shared mode via IAudioClient3. Only supported on Windows 10 and newer. + - Add support for compiling the UWP build as C. + - mal_device_set_recv_callback() and mal_device_set_send_callback() have been deprecated. You must now set this + when the device is initialized with mal_device_init*(). These will be removed in version 0.9.0. + +v0.8.5 - 2018-08-12 + - Add support for specifying the size of a device's buffer in milliseconds. You can still set the buffer size in + frames if that suits you. When bufferSizeInFrames is 0, bufferSizeInMilliseconds will be used. If both are non-0 + then bufferSizeInFrames will take priority. If both are set to 0 the default buffer size is used. + - Add support for the audio(4) backend to OpenBSD. + - Fix a bug with the ALSA backend that was causing problems on Raspberry Pi. This significantly improves the + Raspberry Pi experience. + - Fix a bug where an incorrect number of samples is returned from sinc resampling. + - Add support for setting the value to be passed to internal calls to CoInitializeEx(). + - WASAPI and WinMM: Stop the device when it is unplugged. + +v0.8.4 - 2018-08-06 + - Add sndio backend for OpenBSD. + - Add audio(4) backend for NetBSD. + - Drop support for the OSS backend on everything except FreeBSD and DragonFly BSD. + - Formats are now native-endian (were previously little-endian). + - Mark some APIs as deprecated: + - mal_src_set_input_sample_rate() and mal_src_set_output_sample_rate() are replaced with mal_src_set_sample_rate(). + - mal_dsp_set_input_sample_rate() and mal_dsp_set_output_sample_rate() are replaced with mal_dsp_set_sample_rate(). + - Fix a bug when capturing using the WASAPI backend. + - Fix some aliasing issues with resampling, specifically when increasing the sample rate. + - Fix warnings. + +v0.8.3 - 2018-07-15 + - Fix a crackling bug when resampling in capture mode. + - Core Audio: Fix a bug where capture does not work. + - ALSA: Fix a bug where the worker thread can get stuck in an infinite loop. + - PulseAudio: Fix a bug where mal_context_init() succeeds when PulseAudio is unusable. + - JACK: Fix a bug where mal_context_init() succeeds when JACK is unusable. + +v0.8.2 - 2018-07-07 + - Fix a bug on macOS with Core Audio where the internal callback is not called. + +v0.8.1 - 2018-07-06 + - Fix compilation errors and warnings. + +v0.8 - 2018-07-05 + - Changed MAL_IMPLEMENTATION to MINI_AL_IMPLEMENTATION for consistency with other libraries. The old + way is still supported for now, but you should update as it may be removed in the future. + - API CHANGE: Replace device enumeration APIs. mal_enumerate_devices() has been replaced with + mal_context_get_devices(). An additional low-level device enumration API has been introduced called + mal_context_enumerate_devices() which uses a callback to report devices. + - API CHANGE: Rename mal_get_sample_size_in_bytes() to mal_get_bytes_per_sample() and add + mal_get_bytes_per_frame(). + - API CHANGE: Replace mal_device_config.preferExclusiveMode with mal_device_config.shareMode. + - This new config can be set to mal_share_mode_shared (default) or mal_share_mode_exclusive. + - API CHANGE: Remove excludeNullDevice from mal_context_config.alsa. + - API CHANGE: Rename MAL_MAX_SAMPLE_SIZE_IN_BYTES to MAL_MAX_PCM_SAMPLE_SIZE_IN_BYTES. + - API CHANGE: Change the default channel mapping to the standard Microsoft mapping. + - API CHANGE: Remove backend-specific result codes. + - API CHANGE: Changes to the format conversion APIs (mal_pcm_f32_to_s16(), etc.) + - Add support for Core Audio (Apple). + - Add support for PulseAudio. + - This is the highest priority backend on Linux (higher priority than ALSA) since it is commonly + installed by default on many of the popular distros and offer's more seamless integration on + platforms where PulseAudio is used. In addition, if PulseAudio is installed and running (which + is extremely common), it's better to just use PulseAudio directly rather than going through the + "pulse" ALSA plugin (which is what the "default" ALSA device is likely set to). + - Add support for JACK. + - Remove dependency on asound.h for the ALSA backend. This means the ALSA development packages are no + longer required to build miniaudio. + - Remove dependency on dsound.h for the DirectSound backend. This fixes build issues with some + distributions of MinGW. + - Remove dependency on audioclient.h for the WASAPI backend. This fixes build issues with some + distributions of MinGW. + - Add support for dithering to format conversion. + - Add support for configuring the priority of the worker thread. + - Add a sine wave generator. + - Improve efficiency of sample rate conversion. + - Introduce the notion of standard channel maps. Use mal_get_standard_channel_map(). + - Introduce the notion of default device configurations. A default config uses the same configuration + as the backend's internal device, and as such results in a pass-through data transmission pipeline. + - Add support for passing in NULL for the device config in mal_device_init(), which uses a default + config. This requires manually calling mal_device_set_send/recv_callback(). + - Add support for decoding from raw PCM data (mal_decoder_init_raw(), etc.) + - Make mal_device_init_ex() more robust. + - Make some APIs more const-correct. + - Fix errors with SDL detection on Apple platforms. + - Fix errors with OpenAL detection. + - Fix some memory leaks. + - Fix a bug with opening decoders from memory. + - Early work on SSE2, AVX2 and NEON optimizations. + - Miscellaneous bug fixes. + - Documentation updates. + +v0.7 - 2018-02-25 + - API CHANGE: Change mal_src_read_frames() and mal_dsp_read_frames() to use 64-bit sample counts. + - Add decoder APIs for loading WAV, FLAC, Vorbis and MP3 files. + - Allow opening of devices without a context. + - In this case the context is created and managed internally by the device. + - Change the default channel mapping to the same as that used by FLAC. + - Fix build errors with macOS. + +v0.6c - 2018-02-12 + - Fix build errors with BSD/OSS. + +v0.6b - 2018-02-03 + - Fix some warnings when compiling with Visual C++. + +v0.6a - 2018-01-26 + - Fix errors with channel mixing when increasing the channel count. + - Improvements to the build system for the OpenAL backend. + - Documentation fixes. + +v0.6 - 2017-12-08 + - API CHANGE: Expose and improve mutex APIs. If you were using the mutex APIs before this version you'll + need to update. + - API CHANGE: SRC and DSP callbacks now take a pointer to a mal_src and mal_dsp object respectively. + - API CHANGE: Improvements to event and thread APIs. These changes make these APIs more consistent. + - Add support for SDL and Emscripten. + - Simplify the build system further for when development packages for various backends are not installed. + With this change, when the compiler supports __has_include, backends without the relevant development + packages installed will be ignored. This fixes the build for old versions of MinGW. + - Fixes to the Android build. + - Add mal_convert_frames(). This is a high-level helper API for performing a one-time, bulk conversion of + audio data to a different format. + - Improvements to f32 -> u8/s16/s24/s32 conversion routines. + - Fix a bug where the wrong value is returned from mal_device_start() for the OpenSL backend. + - Fixes and improvements for Raspberry Pi. + - Warning fixes. + +v0.5 - 2017-11-11 + - API CHANGE: The mal_context_init() function now takes a pointer to a mal_context_config object for + configuring the context. The works in the same kind of way as the device config. The rationale for this + change is to give applications better control over context-level properties, add support for backend- + specific configurations, and support extensibility without breaking the API. + - API CHANGE: The alsa.preferPlugHW device config variable has been removed since it's not really useful for + anything anymore. + - ALSA: By default, device enumeration will now only enumerate over unique card/device pairs. Applications + can enable verbose device enumeration by setting the alsa.useVerboseDeviceEnumeration context config + variable. + - ALSA: When opening a device in shared mode (the default), the dmix/dsnoop plugin will be prioritized. If + this fails it will fall back to the hw plugin. With this change the preferExclusiveMode config is now + honored. Note that this does not happen when alsa.useVerboseDeviceEnumeration is set to true (see above) + which is by design. + - ALSA: Add support for excluding the "null" device using the alsa.excludeNullDevice context config variable. + - ALSA: Fix a bug with channel mapping which causes an assertion to fail. + - Fix errors with enumeration when pInfo is set to NULL. + - OSS: Fix a bug when starting a device when the client sends 0 samples for the initial buffer fill. + +v0.4 - 2017-11-05 + - API CHANGE: The log callback is now per-context rather than per-device and as is thus now passed to + mal_context_init(). The rationale for this change is that it allows applications to capture diagnostic + messages at the context level. Previously this was only available at the device level. + - API CHANGE: The device config passed to mal_device_init() is now const. + - Added support for OSS which enables support on BSD platforms. + - Added support for WinMM (waveOut/waveIn). + - Added support for UWP (Universal Windows Platform) applications. Currently C++ only. + - Added support for exclusive mode for selected backends. Currently supported on WASAPI. + - POSIX builds no longer require explicit linking to libpthread (-lpthread). + - ALSA: Explicit linking to libasound (-lasound) is no longer required. + - ALSA: Latency improvements. + - ALSA: Use MMAP mode where available. This can be disabled with the alsa.noMMap config. + - ALSA: Use "hw" devices instead of "plughw" devices by default. This can be disabled with the + alsa.preferPlugHW config. + - WASAPI is now the highest priority backend on Windows platforms. + - Fixed an error with sample rate conversion which was causing crackling when capturing. + - Improved error handling. + - Improved compiler support. + - Miscellaneous bug fixes. + +v0.3 - 2017-06-19 + - API CHANGE: Introduced the notion of a context. The context is the highest level object and is required for + enumerating and creating devices. Now, applications must first create a context, and then use that to + enumerate and create devices. The reason for this change is to ensure device enumeration and creation is + tied to the same backend. In addition, some backends are better suited to this design. + - API CHANGE: Removed the rewinding APIs because they're too inconsistent across the different backends, hard + to test and maintain, and just generally unreliable. + - Added helper APIs for initializing mal_device_config objects. + - Null Backend: Fixed a crash when recording. + - Fixed build for UWP. + - Added support for f32 formats to the OpenSL|ES backend. + - Added initial implementation of the WASAPI backend. + - Added initial implementation of the OpenAL backend. + - Added support for low quality linear sample rate conversion. + - Added early support for basic channel mapping. + +v0.2 - 2016-10-28 + - API CHANGE: Add user data pointer as the last parameter to mal_device_init(). The rationale for this + change is to ensure the logging callback has access to the user data during initialization. + - API CHANGE: Have device configuration properties be passed to mal_device_init() via a structure. Rationale: + 1) The number of parameters is just getting too much. + 2) It makes it a bit easier to add new configuration properties in the future. In particular, there's a + chance there will be support added for backend-specific properties. + - Dropped support for f64, A-law and Mu-law formats since they just aren't common enough to justify the + added maintenance cost. + - DirectSound: Increased the default buffer size for capture devices. + - Added initial implementation of the OpenSL|ES backend. + +v0.1 - 2016-10-21 + - Initial versioned release. +*/ + + +/* +This software is available as a choice of the following licenses. Choose +whichever you prefer. + +=============================================================================== +ALTERNATIVE 1 - Public Domain (www.unlicense.org) +=============================================================================== +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. + +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + +=============================================================================== +ALTERNATIVE 2 - MIT No Attribution +=============================================================================== +Copyright 2020 David Reid + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ diff --git a/fishladder.c b/fishladder.c new file mode 100644 index 0000000..a9ce03a --- /dev/null +++ b/fishladder.c @@ -0,0 +1,29 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +//#define VG_STEAM +#include "vg/vg.h" + +int main( int argc, char *argv[] ) +{ + vg_init( argc, argv, "FishLadder" ); +} + +void vg_start(void) +{ + +} + +void vg_update(void) +{ + +} + +void vg_render(void) +{ + +} + +void vg_free(void) +{ + +} diff --git a/gl/KHR/khrplatform.h b/gl/KHR/khrplatform.h new file mode 100644 index 0000000..dd22d92 --- /dev/null +++ b/gl/KHR/khrplatform.h @@ -0,0 +1,290 @@ +#ifndef __khrplatform_h_ +#define __khrplatform_h_ + +/* +** Copyright (c) 2008-2018 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Khronos platform-specific types and definitions. + * + * The master copy of khrplatform.h is maintained in the Khronos EGL + * Registry repository at https://github.com/KhronosGroup/EGL-Registry + * The last semantic modification to khrplatform.h was at commit ID: + * 67a3e0864c2d75ea5287b9f3d2eb74a745936692 + * + * Adopters may modify this file to suit their platform. Adopters are + * encouraged to submit platform specific modifications to the Khronos + * group so that they can be included in future versions of this file. + * Please submit changes by filing pull requests or issues on + * the EGL Registry repository linked above. + * + * + * See the Implementer's Guidelines for information about where this file + * should be located on your system and for more details of its use: + * http://www.khronos.org/registry/implementers_guide.pdf + * + * This file should be included as + * #include + * by Khronos client API header files that use its types and defines. + * + * The types in khrplatform.h should only be used to define API-specific types. + * + * Types defined in khrplatform.h: + * khronos_int8_t signed 8 bit + * khronos_uint8_t unsigned 8 bit + * khronos_int16_t signed 16 bit + * khronos_uint16_t unsigned 16 bit + * khronos_int32_t signed 32 bit + * khronos_uint32_t unsigned 32 bit + * khronos_int64_t signed 64 bit + * khronos_uint64_t unsigned 64 bit + * khronos_intptr_t signed same number of bits as a pointer + * khronos_uintptr_t unsigned same number of bits as a pointer + * khronos_ssize_t signed size + * khronos_usize_t unsigned size + * khronos_float_t signed 32 bit floating point + * khronos_time_ns_t unsigned 64 bit time in nanoseconds + * khronos_utime_nanoseconds_t unsigned time interval or absolute time in + * nanoseconds + * khronos_stime_nanoseconds_t signed time interval in nanoseconds + * khronos_boolean_enum_t enumerated boolean type. This should + * only be used as a base type when a client API's boolean type is + * an enum. Client APIs which use an integer or other type for + * booleans cannot use this as the base type for their boolean. + * + * Tokens defined in khrplatform.h: + * + * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values. + * + * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0. + * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0. + * + * Calling convention macros defined in this file: + * KHRONOS_APICALL + * KHRONOS_APIENTRY + * KHRONOS_APIATTRIBUTES + * + * These may be used in function prototypes as: + * + * KHRONOS_APICALL void KHRONOS_APIENTRY funcname( + * int arg1, + * int arg2) KHRONOS_APIATTRIBUTES; + */ + +#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC) +# define KHRONOS_STATIC 1 +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APICALL + *------------------------------------------------------------------------- + * This precedes the return type of the function in the function prototype. + */ +#if defined(KHRONOS_STATIC) + /* If the preprocessor constant KHRONOS_STATIC is defined, make the + * header compatible with static linking. */ +# define KHRONOS_APICALL +#elif defined(_WIN32) +# define KHRONOS_APICALL __declspec(dllimport) +#elif defined (__SYMBIAN32__) +# define KHRONOS_APICALL IMPORT_C +#elif defined(__ANDROID__) +# define KHRONOS_APICALL __attribute__((visibility("default"))) +#else +# define KHRONOS_APICALL +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIENTRY + *------------------------------------------------------------------------- + * This follows the return type of the function and precedes the function + * name in the function prototype. + */ +#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__) + /* Win32 but not WinCE */ +# define KHRONOS_APIENTRY __stdcall +#else +# define KHRONOS_APIENTRY +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIATTRIBUTES + *------------------------------------------------------------------------- + * This follows the closing parenthesis of the function prototype arguments. + */ +#if defined (__ARMCC_2__) +#define KHRONOS_APIATTRIBUTES __softfp +#else +#define KHRONOS_APIATTRIBUTES +#endif + +/*------------------------------------------------------------------------- + * basic type definitions + *-----------------------------------------------------------------------*/ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__) + + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__VMS ) || defined(__sgi) + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(_WIN32) && !defined(__SCITECH_SNAP__) + +/* + * Win32 + */ +typedef __int32 khronos_int32_t; +typedef unsigned __int32 khronos_uint32_t; +typedef __int64 khronos_int64_t; +typedef unsigned __int64 khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__sun__) || defined(__digital__) + +/* + * Sun or Digital + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#if defined(__arch64__) || defined(_LP64) +typedef long int khronos_int64_t; +typedef unsigned long int khronos_uint64_t; +#else +typedef long long int khronos_int64_t; +typedef unsigned long long int khronos_uint64_t; +#endif /* __arch64__ */ +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif 0 + +/* + * Hypothetical platform with no float or int64 support + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#define KHRONOS_SUPPORT_INT64 0 +#define KHRONOS_SUPPORT_FLOAT 0 + +#else + +/* + * Generic fallback + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#endif + + +/* + * Types that are (so far) the same on all platforms + */ +typedef signed char khronos_int8_t; +typedef unsigned char khronos_uint8_t; +typedef signed short int khronos_int16_t; +typedef unsigned short int khronos_uint16_t; + +/* + * Types that differ between LLP64 and LP64 architectures - in LLP64, + * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears + * to be the only LLP64 architecture in current use. + */ +#ifdef _WIN64 +typedef signed long long int khronos_intptr_t; +typedef unsigned long long int khronos_uintptr_t; +typedef signed long long int khronos_ssize_t; +typedef unsigned long long int khronos_usize_t; +#else +typedef signed long int khronos_intptr_t; +typedef unsigned long int khronos_uintptr_t; +typedef signed long int khronos_ssize_t; +typedef unsigned long int khronos_usize_t; +#endif + +#if KHRONOS_SUPPORT_FLOAT +/* + * Float type + */ +typedef float khronos_float_t; +#endif + +#if KHRONOS_SUPPORT_INT64 +/* Time types + * + * These types can be used to represent a time interval in nanoseconds or + * an absolute Unadjusted System Time. Unadjusted System Time is the number + * of nanoseconds since some arbitrary system event (e.g. since the last + * time the system booted). The Unadjusted System Time is an unsigned + * 64 bit value that wraps back to 0 every 584 years. Time intervals + * may be either signed or unsigned. + */ +typedef khronos_uint64_t khronos_utime_nanoseconds_t; +typedef khronos_int64_t khronos_stime_nanoseconds_t; +#endif + +/* + * Dummy value used to pad enum types to 32 bits. + */ +#ifndef KHRONOS_MAX_ENUM +#define KHRONOS_MAX_ENUM 0x7FFFFFFF +#endif + +/* + * Enumerated boolean type + * + * Values other than zero should be considered to be true. Therefore + * comparisons should not be made against KHRONOS_TRUE. + */ +typedef enum { + KHRONOS_FALSE = 0, + KHRONOS_TRUE = 1, + KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM +} khronos_boolean_enum_t; + +#endif /* __khrplatform_h_ */ diff --git a/gl/glad.c b/gl/glad.c new file mode 100644 index 0000000..6495d19 --- /dev/null +++ b/gl/glad.c @@ -0,0 +1,3814 @@ +/* + + OpenGL loader generated by glad 0.1.34 on Sun Jul 18 08:24:23 2021. + + Language/Generator: C/C++ Debug + Specification: gl + APIs: gl=3.3 + Profile: core + Extensions: + + Loader: True + Local files: False + Omit khrplatform: False + Reproducible: False + + Commandline: + --profile="core" --api="gl=3.3" --generator="c-debug" --spec="gl" --extensions="" + Online: + https://glad.dav1d.de/#profile=core&language=c-debug&specification=gl&loader=on&api=gl%3D3.3 +*/ + +#include +#include +#include +#include "gl/glad/glad.h" + +void _pre_call_callback_default(const char *name, void *funcptr, int len_args, ...) { + (void) name; + (void) funcptr; + (void) len_args; +} +void _post_call_callback_default(const char *name, void *funcptr, int len_args, ...) { + GLenum error_code; + + (void) funcptr; + (void) len_args; + + error_code = glad_glGetError(); + + if (error_code != GL_NO_ERROR) { + fprintf(stderr, "ERROR %d in %s\n", error_code, name); + } +} + +static GLADcallback _pre_call_callback = _pre_call_callback_default; +void glad_set_pre_callback(GLADcallback cb) { + _pre_call_callback = cb; +} + +static GLADcallback _post_call_callback = _post_call_callback_default; +void glad_set_post_callback(GLADcallback cb) { + _post_call_callback = cb; +} + +static void* get_proc(const char *namez); + +#if defined(_WIN32) || defined(__CYGWIN__) +#ifndef _WINDOWS_ +#undef APIENTRY +#endif +#include +static HMODULE libGL; + +typedef void* (APIENTRYP PFNWGLGETPROCADDRESSPROC_PRIVATE)(const char*); +static PFNWGLGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; + +#ifdef _MSC_VER +#ifdef __has_include + #if __has_include() + #define HAVE_WINAPIFAMILY 1 + #endif +#elif _MSC_VER >= 1700 && !_USING_V110_SDK71_ + #define HAVE_WINAPIFAMILY 1 +#endif +#endif + +#ifdef HAVE_WINAPIFAMILY + #include + #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define IS_UWP 1 + #endif +#endif + +static +int open_gl(void) { +#ifndef IS_UWP + libGL = LoadLibraryW(L"opengl32.dll"); + if(libGL != NULL) { + void (* tmp)(void); + tmp = (void(*)(void)) GetProcAddress(libGL, "wglGetProcAddress"); + gladGetProcAddressPtr = (PFNWGLGETPROCADDRESSPROC_PRIVATE) tmp; + return gladGetProcAddressPtr != NULL; + } +#endif + + return 0; +} + +static +void close_gl(void) { + if(libGL != NULL) { + FreeLibrary((HMODULE) libGL); + libGL = NULL; + } +} +#else +#include +static void* libGL; + +#if !defined(__APPLE__) && !defined(__HAIKU__) +typedef void* (APIENTRYP PFNGLXGETPROCADDRESSPROC_PRIVATE)(const char*); +static PFNGLXGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; +#endif + +static +int open_gl(void) { +#ifdef __APPLE__ + static const char *NAMES[] = { + "../Frameworks/OpenGL.framework/OpenGL", + "/Library/Frameworks/OpenGL.framework/OpenGL", + "/System/Library/Frameworks/OpenGL.framework/OpenGL", + "/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL" + }; +#else + static const char *NAMES[] = {"libGL.so.1", "libGL.so"}; +#endif + + unsigned int index = 0; + for(index = 0; index < (sizeof(NAMES) / sizeof(NAMES[0])); index++) { + libGL = dlopen(NAMES[index], RTLD_NOW | RTLD_GLOBAL); + + if(libGL != NULL) { +#if defined(__APPLE__) || defined(__HAIKU__) + return 1; +#else + gladGetProcAddressPtr = (PFNGLXGETPROCADDRESSPROC_PRIVATE)dlsym(libGL, + "glXGetProcAddressARB"); + return gladGetProcAddressPtr != NULL; +#endif + } + } + + return 0; +} + +static +void close_gl(void) { + if(libGL != NULL) { + dlclose(libGL); + libGL = NULL; + } +} +#endif + +static +void* get_proc(const char *namez) { + void* result = NULL; + if(libGL == NULL) return NULL; + +#if !defined(__APPLE__) && !defined(__HAIKU__) + if(gladGetProcAddressPtr != NULL) { + result = gladGetProcAddressPtr(namez); + } +#endif + if(result == NULL) { +#if defined(_WIN32) || defined(__CYGWIN__) + result = (void*)GetProcAddress((HMODULE) libGL, namez); +#else + result = dlsym(libGL, namez); +#endif + } + + return result; +} + +int gladLoadGL(void) { + int status = 0; + + if(open_gl()) { + status = gladLoadGLLoader(&get_proc); + close_gl(); + } + + return status; +} + +struct gladGLversionStruct GLVersion = { 0, 0 }; + +#if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0) +#define _GLAD_IS_SOME_NEW_VERSION 1 +#endif + +static int max_loaded_major; +static int max_loaded_minor; + +static const char *exts = NULL; +static int num_exts_i = 0; +static char **exts_i = NULL; + +static int get_exts(void) { +#ifdef _GLAD_IS_SOME_NEW_VERSION + if(max_loaded_major < 3) { +#endif + exts = (const char *)glGetString(GL_EXTENSIONS); +#ifdef _GLAD_IS_SOME_NEW_VERSION + } else { + unsigned int index; + + num_exts_i = 0; + glGetIntegerv(GL_NUM_EXTENSIONS, &num_exts_i); + if (num_exts_i > 0) { + exts_i = (char **)malloc((size_t)num_exts_i * (sizeof *exts_i)); + } + + if (exts_i == NULL) { + return 0; + } + + for(index = 0; index < (unsigned)num_exts_i; index++) { + const char *gl_str_tmp = (const char*)glGetStringi(GL_EXTENSIONS, index); + size_t len = strlen(gl_str_tmp); + + char *local_str = (char*)malloc((len+1) * sizeof(char)); + if(local_str != NULL) { + memcpy(local_str, gl_str_tmp, (len+1) * sizeof(char)); + } + exts_i[index] = local_str; + } + } +#endif + return 1; +} + +static void free_exts(void) { + if (exts_i != NULL) { + int index; + for(index = 0; index < num_exts_i; index++) { + free((char *)exts_i[index]); + } + free((void *)exts_i); + exts_i = NULL; + } +} + +static int has_ext(const char *ext) { +#ifdef _GLAD_IS_SOME_NEW_VERSION + if(max_loaded_major < 3) { +#endif + const char *extensions; + const char *loc; + const char *terminator; + extensions = exts; + if(extensions == NULL || ext == NULL) { + return 0; + } + + while(1) { + loc = strstr(extensions, ext); + if(loc == NULL) { + return 0; + } + + terminator = loc + strlen(ext); + if((loc == extensions || *(loc - 1) == ' ') && + (*terminator == ' ' || *terminator == '\0')) { + return 1; + } + extensions = terminator; + } +#ifdef _GLAD_IS_SOME_NEW_VERSION + } else { + int index; + if(exts_i == NULL) return 0; + for(index = 0; index < num_exts_i; index++) { + const char *e = exts_i[index]; + + if(exts_i[index] != NULL && strcmp(e, ext) == 0) { + return 1; + } + } + } +#endif + + return 0; +} +int GLAD_GL_VERSION_1_0 = 0; +int GLAD_GL_VERSION_1_1 = 0; +int GLAD_GL_VERSION_1_2 = 0; +int GLAD_GL_VERSION_1_3 = 0; +int GLAD_GL_VERSION_1_4 = 0; +int GLAD_GL_VERSION_1_5 = 0; +int GLAD_GL_VERSION_2_0 = 0; +int GLAD_GL_VERSION_2_1 = 0; +int GLAD_GL_VERSION_3_0 = 0; +int GLAD_GL_VERSION_3_1 = 0; +int GLAD_GL_VERSION_3_2 = 0; +int GLAD_GL_VERSION_3_3 = 0; +PFNGLACTIVETEXTUREPROC glad_glActiveTexture; +void APIENTRY glad_debug_impl_glActiveTexture(GLenum arg0) { + _pre_call_callback("glActiveTexture", (void*)glActiveTexture, 1, arg0); + glad_glActiveTexture(arg0); + _post_call_callback("glActiveTexture", (void*)glActiveTexture, 1, arg0); + +} +PFNGLACTIVETEXTUREPROC glad_debug_glActiveTexture = glad_debug_impl_glActiveTexture; +PFNGLATTACHSHADERPROC glad_glAttachShader; +void APIENTRY glad_debug_impl_glAttachShader(GLuint arg0, GLuint arg1) { + _pre_call_callback("glAttachShader", (void*)glAttachShader, 2, arg0, arg1); + glad_glAttachShader(arg0, arg1); + _post_call_callback("glAttachShader", (void*)glAttachShader, 2, arg0, arg1); + +} +PFNGLATTACHSHADERPROC glad_debug_glAttachShader = glad_debug_impl_glAttachShader; +PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender; +void APIENTRY glad_debug_impl_glBeginConditionalRender(GLuint arg0, GLenum arg1) { + _pre_call_callback("glBeginConditionalRender", (void*)glBeginConditionalRender, 2, arg0, arg1); + glad_glBeginConditionalRender(arg0, arg1); + _post_call_callback("glBeginConditionalRender", (void*)glBeginConditionalRender, 2, arg0, arg1); + +} +PFNGLBEGINCONDITIONALRENDERPROC glad_debug_glBeginConditionalRender = glad_debug_impl_glBeginConditionalRender; +PFNGLBEGINQUERYPROC glad_glBeginQuery; +void APIENTRY glad_debug_impl_glBeginQuery(GLenum arg0, GLuint arg1) { + _pre_call_callback("glBeginQuery", (void*)glBeginQuery, 2, arg0, arg1); + glad_glBeginQuery(arg0, arg1); + _post_call_callback("glBeginQuery", (void*)glBeginQuery, 2, arg0, arg1); + +} +PFNGLBEGINQUERYPROC glad_debug_glBeginQuery = glad_debug_impl_glBeginQuery; +PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback; +void APIENTRY glad_debug_impl_glBeginTransformFeedback(GLenum arg0) { + _pre_call_callback("glBeginTransformFeedback", (void*)glBeginTransformFeedback, 1, arg0); + glad_glBeginTransformFeedback(arg0); + _post_call_callback("glBeginTransformFeedback", (void*)glBeginTransformFeedback, 1, arg0); + +} +PFNGLBEGINTRANSFORMFEEDBACKPROC glad_debug_glBeginTransformFeedback = glad_debug_impl_glBeginTransformFeedback; +PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation; +void APIENTRY glad_debug_impl_glBindAttribLocation(GLuint arg0, GLuint arg1, const GLchar * arg2) { + _pre_call_callback("glBindAttribLocation", (void*)glBindAttribLocation, 3, arg0, arg1, arg2); + glad_glBindAttribLocation(arg0, arg1, arg2); + _post_call_callback("glBindAttribLocation", (void*)glBindAttribLocation, 3, arg0, arg1, arg2); + +} +PFNGLBINDATTRIBLOCATIONPROC glad_debug_glBindAttribLocation = glad_debug_impl_glBindAttribLocation; +PFNGLBINDBUFFERPROC glad_glBindBuffer; +void APIENTRY glad_debug_impl_glBindBuffer(GLenum arg0, GLuint arg1) { + _pre_call_callback("glBindBuffer", (void*)glBindBuffer, 2, arg0, arg1); + glad_glBindBuffer(arg0, arg1); + _post_call_callback("glBindBuffer", (void*)glBindBuffer, 2, arg0, arg1); + +} +PFNGLBINDBUFFERPROC glad_debug_glBindBuffer = glad_debug_impl_glBindBuffer; +PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase; +void APIENTRY glad_debug_impl_glBindBufferBase(GLenum arg0, GLuint arg1, GLuint arg2) { + _pre_call_callback("glBindBufferBase", (void*)glBindBufferBase, 3, arg0, arg1, arg2); + glad_glBindBufferBase(arg0, arg1, arg2); + _post_call_callback("glBindBufferBase", (void*)glBindBufferBase, 3, arg0, arg1, arg2); + +} +PFNGLBINDBUFFERBASEPROC glad_debug_glBindBufferBase = glad_debug_impl_glBindBufferBase; +PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange; +void APIENTRY glad_debug_impl_glBindBufferRange(GLenum arg0, GLuint arg1, GLuint arg2, GLintptr arg3, GLsizeiptr arg4) { + _pre_call_callback("glBindBufferRange", (void*)glBindBufferRange, 5, arg0, arg1, arg2, arg3, arg4); + glad_glBindBufferRange(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glBindBufferRange", (void*)glBindBufferRange, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLBINDBUFFERRANGEPROC glad_debug_glBindBufferRange = glad_debug_impl_glBindBufferRange; +PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation; +void APIENTRY glad_debug_impl_glBindFragDataLocation(GLuint arg0, GLuint arg1, const GLchar * arg2) { + _pre_call_callback("glBindFragDataLocation", (void*)glBindFragDataLocation, 3, arg0, arg1, arg2); + glad_glBindFragDataLocation(arg0, arg1, arg2); + _post_call_callback("glBindFragDataLocation", (void*)glBindFragDataLocation, 3, arg0, arg1, arg2); + +} +PFNGLBINDFRAGDATALOCATIONPROC glad_debug_glBindFragDataLocation = glad_debug_impl_glBindFragDataLocation; +PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed; +void APIENTRY glad_debug_impl_glBindFragDataLocationIndexed(GLuint arg0, GLuint arg1, GLuint arg2, const GLchar * arg3) { + _pre_call_callback("glBindFragDataLocationIndexed", (void*)glBindFragDataLocationIndexed, 4, arg0, arg1, arg2, arg3); + glad_glBindFragDataLocationIndexed(arg0, arg1, arg2, arg3); + _post_call_callback("glBindFragDataLocationIndexed", (void*)glBindFragDataLocationIndexed, 4, arg0, arg1, arg2, arg3); + +} +PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_debug_glBindFragDataLocationIndexed = glad_debug_impl_glBindFragDataLocationIndexed; +PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer; +void APIENTRY glad_debug_impl_glBindFramebuffer(GLenum arg0, GLuint arg1) { + _pre_call_callback("glBindFramebuffer", (void*)glBindFramebuffer, 2, arg0, arg1); + glad_glBindFramebuffer(arg0, arg1); + _post_call_callback("glBindFramebuffer", (void*)glBindFramebuffer, 2, arg0, arg1); + +} +PFNGLBINDFRAMEBUFFERPROC glad_debug_glBindFramebuffer = glad_debug_impl_glBindFramebuffer; +PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer; +void APIENTRY glad_debug_impl_glBindRenderbuffer(GLenum arg0, GLuint arg1) { + _pre_call_callback("glBindRenderbuffer", (void*)glBindRenderbuffer, 2, arg0, arg1); + glad_glBindRenderbuffer(arg0, arg1); + _post_call_callback("glBindRenderbuffer", (void*)glBindRenderbuffer, 2, arg0, arg1); + +} +PFNGLBINDRENDERBUFFERPROC glad_debug_glBindRenderbuffer = glad_debug_impl_glBindRenderbuffer; +PFNGLBINDSAMPLERPROC glad_glBindSampler; +void APIENTRY glad_debug_impl_glBindSampler(GLuint arg0, GLuint arg1) { + _pre_call_callback("glBindSampler", (void*)glBindSampler, 2, arg0, arg1); + glad_glBindSampler(arg0, arg1); + _post_call_callback("glBindSampler", (void*)glBindSampler, 2, arg0, arg1); + +} +PFNGLBINDSAMPLERPROC glad_debug_glBindSampler = glad_debug_impl_glBindSampler; +PFNGLBINDTEXTUREPROC glad_glBindTexture; +void APIENTRY glad_debug_impl_glBindTexture(GLenum arg0, GLuint arg1) { + _pre_call_callback("glBindTexture", (void*)glBindTexture, 2, arg0, arg1); + glad_glBindTexture(arg0, arg1); + _post_call_callback("glBindTexture", (void*)glBindTexture, 2, arg0, arg1); + +} +PFNGLBINDTEXTUREPROC glad_debug_glBindTexture = glad_debug_impl_glBindTexture; +PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray; +void APIENTRY glad_debug_impl_glBindVertexArray(GLuint arg0) { + _pre_call_callback("glBindVertexArray", (void*)glBindVertexArray, 1, arg0); + glad_glBindVertexArray(arg0); + _post_call_callback("glBindVertexArray", (void*)glBindVertexArray, 1, arg0); + +} +PFNGLBINDVERTEXARRAYPROC glad_debug_glBindVertexArray = glad_debug_impl_glBindVertexArray; +PFNGLBLENDCOLORPROC glad_glBlendColor; +void APIENTRY glad_debug_impl_glBlendColor(GLfloat arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3) { + _pre_call_callback("glBlendColor", (void*)glBlendColor, 4, arg0, arg1, arg2, arg3); + glad_glBlendColor(arg0, arg1, arg2, arg3); + _post_call_callback("glBlendColor", (void*)glBlendColor, 4, arg0, arg1, arg2, arg3); + +} +PFNGLBLENDCOLORPROC glad_debug_glBlendColor = glad_debug_impl_glBlendColor; +PFNGLBLENDEQUATIONPROC glad_glBlendEquation; +void APIENTRY glad_debug_impl_glBlendEquation(GLenum arg0) { + _pre_call_callback("glBlendEquation", (void*)glBlendEquation, 1, arg0); + glad_glBlendEquation(arg0); + _post_call_callback("glBlendEquation", (void*)glBlendEquation, 1, arg0); + +} +PFNGLBLENDEQUATIONPROC glad_debug_glBlendEquation = glad_debug_impl_glBlendEquation; +PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate; +void APIENTRY glad_debug_impl_glBlendEquationSeparate(GLenum arg0, GLenum arg1) { + _pre_call_callback("glBlendEquationSeparate", (void*)glBlendEquationSeparate, 2, arg0, arg1); + glad_glBlendEquationSeparate(arg0, arg1); + _post_call_callback("glBlendEquationSeparate", (void*)glBlendEquationSeparate, 2, arg0, arg1); + +} +PFNGLBLENDEQUATIONSEPARATEPROC glad_debug_glBlendEquationSeparate = glad_debug_impl_glBlendEquationSeparate; +PFNGLBLENDFUNCPROC glad_glBlendFunc; +void APIENTRY glad_debug_impl_glBlendFunc(GLenum arg0, GLenum arg1) { + _pre_call_callback("glBlendFunc", (void*)glBlendFunc, 2, arg0, arg1); + glad_glBlendFunc(arg0, arg1); + _post_call_callback("glBlendFunc", (void*)glBlendFunc, 2, arg0, arg1); + +} +PFNGLBLENDFUNCPROC glad_debug_glBlendFunc = glad_debug_impl_glBlendFunc; +PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate; +void APIENTRY glad_debug_impl_glBlendFuncSeparate(GLenum arg0, GLenum arg1, GLenum arg2, GLenum arg3) { + _pre_call_callback("glBlendFuncSeparate", (void*)glBlendFuncSeparate, 4, arg0, arg1, arg2, arg3); + glad_glBlendFuncSeparate(arg0, arg1, arg2, arg3); + _post_call_callback("glBlendFuncSeparate", (void*)glBlendFuncSeparate, 4, arg0, arg1, arg2, arg3); + +} +PFNGLBLENDFUNCSEPARATEPROC glad_debug_glBlendFuncSeparate = glad_debug_impl_glBlendFuncSeparate; +PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer; +void APIENTRY glad_debug_impl_glBlitFramebuffer(GLint arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLint arg5, GLint arg6, GLint arg7, GLbitfield arg8, GLenum arg9) { + _pre_call_callback("glBlitFramebuffer", (void*)glBlitFramebuffer, 10, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + glad_glBlitFramebuffer(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + _post_call_callback("glBlitFramebuffer", (void*)glBlitFramebuffer, 10, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + +} +PFNGLBLITFRAMEBUFFERPROC glad_debug_glBlitFramebuffer = glad_debug_impl_glBlitFramebuffer; +PFNGLBUFFERDATAPROC glad_glBufferData; +void APIENTRY glad_debug_impl_glBufferData(GLenum arg0, GLsizeiptr arg1, const void * arg2, GLenum arg3) { + _pre_call_callback("glBufferData", (void*)glBufferData, 4, arg0, arg1, arg2, arg3); + glad_glBufferData(arg0, arg1, arg2, arg3); + _post_call_callback("glBufferData", (void*)glBufferData, 4, arg0, arg1, arg2, arg3); + +} +PFNGLBUFFERDATAPROC glad_debug_glBufferData = glad_debug_impl_glBufferData; +PFNGLBUFFERSUBDATAPROC glad_glBufferSubData; +void APIENTRY glad_debug_impl_glBufferSubData(GLenum arg0, GLintptr arg1, GLsizeiptr arg2, const void * arg3) { + _pre_call_callback("glBufferSubData", (void*)glBufferSubData, 4, arg0, arg1, arg2, arg3); + glad_glBufferSubData(arg0, arg1, arg2, arg3); + _post_call_callback("glBufferSubData", (void*)glBufferSubData, 4, arg0, arg1, arg2, arg3); + +} +PFNGLBUFFERSUBDATAPROC glad_debug_glBufferSubData = glad_debug_impl_glBufferSubData; +PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus; +GLenum APIENTRY glad_debug_impl_glCheckFramebufferStatus(GLenum arg0) { + GLenum ret; + _pre_call_callback("glCheckFramebufferStatus", (void*)glCheckFramebufferStatus, 1, arg0); + ret = glad_glCheckFramebufferStatus(arg0); + _post_call_callback("glCheckFramebufferStatus", (void*)glCheckFramebufferStatus, 1, arg0); + return ret; +} +PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_debug_glCheckFramebufferStatus = glad_debug_impl_glCheckFramebufferStatus; +PFNGLCLAMPCOLORPROC glad_glClampColor; +void APIENTRY glad_debug_impl_glClampColor(GLenum arg0, GLenum arg1) { + _pre_call_callback("glClampColor", (void*)glClampColor, 2, arg0, arg1); + glad_glClampColor(arg0, arg1); + _post_call_callback("glClampColor", (void*)glClampColor, 2, arg0, arg1); + +} +PFNGLCLAMPCOLORPROC glad_debug_glClampColor = glad_debug_impl_glClampColor; +PFNGLCLEARPROC glad_glClear; +void APIENTRY glad_debug_impl_glClear(GLbitfield arg0) { + _pre_call_callback("glClear", (void*)glClear, 1, arg0); + glad_glClear(arg0); + _post_call_callback("glClear", (void*)glClear, 1, arg0); + +} +PFNGLCLEARPROC glad_debug_glClear = glad_debug_impl_glClear; +PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi; +void APIENTRY glad_debug_impl_glClearBufferfi(GLenum arg0, GLint arg1, GLfloat arg2, GLint arg3) { + _pre_call_callback("glClearBufferfi", (void*)glClearBufferfi, 4, arg0, arg1, arg2, arg3); + glad_glClearBufferfi(arg0, arg1, arg2, arg3); + _post_call_callback("glClearBufferfi", (void*)glClearBufferfi, 4, arg0, arg1, arg2, arg3); + +} +PFNGLCLEARBUFFERFIPROC glad_debug_glClearBufferfi = glad_debug_impl_glClearBufferfi; +PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv; +void APIENTRY glad_debug_impl_glClearBufferfv(GLenum arg0, GLint arg1, const GLfloat * arg2) { + _pre_call_callback("glClearBufferfv", (void*)glClearBufferfv, 3, arg0, arg1, arg2); + glad_glClearBufferfv(arg0, arg1, arg2); + _post_call_callback("glClearBufferfv", (void*)glClearBufferfv, 3, arg0, arg1, arg2); + +} +PFNGLCLEARBUFFERFVPROC glad_debug_glClearBufferfv = glad_debug_impl_glClearBufferfv; +PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv; +void APIENTRY glad_debug_impl_glClearBufferiv(GLenum arg0, GLint arg1, const GLint * arg2) { + _pre_call_callback("glClearBufferiv", (void*)glClearBufferiv, 3, arg0, arg1, arg2); + glad_glClearBufferiv(arg0, arg1, arg2); + _post_call_callback("glClearBufferiv", (void*)glClearBufferiv, 3, arg0, arg1, arg2); + +} +PFNGLCLEARBUFFERIVPROC glad_debug_glClearBufferiv = glad_debug_impl_glClearBufferiv; +PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv; +void APIENTRY glad_debug_impl_glClearBufferuiv(GLenum arg0, GLint arg1, const GLuint * arg2) { + _pre_call_callback("glClearBufferuiv", (void*)glClearBufferuiv, 3, arg0, arg1, arg2); + glad_glClearBufferuiv(arg0, arg1, arg2); + _post_call_callback("glClearBufferuiv", (void*)glClearBufferuiv, 3, arg0, arg1, arg2); + +} +PFNGLCLEARBUFFERUIVPROC glad_debug_glClearBufferuiv = glad_debug_impl_glClearBufferuiv; +PFNGLCLEARCOLORPROC glad_glClearColor; +void APIENTRY glad_debug_impl_glClearColor(GLfloat arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3) { + _pre_call_callback("glClearColor", (void*)glClearColor, 4, arg0, arg1, arg2, arg3); + glad_glClearColor(arg0, arg1, arg2, arg3); + _post_call_callback("glClearColor", (void*)glClearColor, 4, arg0, arg1, arg2, arg3); + +} +PFNGLCLEARCOLORPROC glad_debug_glClearColor = glad_debug_impl_glClearColor; +PFNGLCLEARDEPTHPROC glad_glClearDepth; +void APIENTRY glad_debug_impl_glClearDepth(GLdouble arg0) { + _pre_call_callback("glClearDepth", (void*)glClearDepth, 1, arg0); + glad_glClearDepth(arg0); + _post_call_callback("glClearDepth", (void*)glClearDepth, 1, arg0); + +} +PFNGLCLEARDEPTHPROC glad_debug_glClearDepth = glad_debug_impl_glClearDepth; +PFNGLCLEARSTENCILPROC glad_glClearStencil; +void APIENTRY glad_debug_impl_glClearStencil(GLint arg0) { + _pre_call_callback("glClearStencil", (void*)glClearStencil, 1, arg0); + glad_glClearStencil(arg0); + _post_call_callback("glClearStencil", (void*)glClearStencil, 1, arg0); + +} +PFNGLCLEARSTENCILPROC glad_debug_glClearStencil = glad_debug_impl_glClearStencil; +PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync; +GLenum APIENTRY glad_debug_impl_glClientWaitSync(GLsync arg0, GLbitfield arg1, GLuint64 arg2) { + GLenum ret; + _pre_call_callback("glClientWaitSync", (void*)glClientWaitSync, 3, arg0, arg1, arg2); + ret = glad_glClientWaitSync(arg0, arg1, arg2); + _post_call_callback("glClientWaitSync", (void*)glClientWaitSync, 3, arg0, arg1, arg2); + return ret; +} +PFNGLCLIENTWAITSYNCPROC glad_debug_glClientWaitSync = glad_debug_impl_glClientWaitSync; +PFNGLCOLORMASKPROC glad_glColorMask; +void APIENTRY glad_debug_impl_glColorMask(GLboolean arg0, GLboolean arg1, GLboolean arg2, GLboolean arg3) { + _pre_call_callback("glColorMask", (void*)glColorMask, 4, arg0, arg1, arg2, arg3); + glad_glColorMask(arg0, arg1, arg2, arg3); + _post_call_callback("glColorMask", (void*)glColorMask, 4, arg0, arg1, arg2, arg3); + +} +PFNGLCOLORMASKPROC glad_debug_glColorMask = glad_debug_impl_glColorMask; +PFNGLCOLORMASKIPROC glad_glColorMaski; +void APIENTRY glad_debug_impl_glColorMaski(GLuint arg0, GLboolean arg1, GLboolean arg2, GLboolean arg3, GLboolean arg4) { + _pre_call_callback("glColorMaski", (void*)glColorMaski, 5, arg0, arg1, arg2, arg3, arg4); + glad_glColorMaski(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glColorMaski", (void*)glColorMaski, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLCOLORMASKIPROC glad_debug_glColorMaski = glad_debug_impl_glColorMaski; +PFNGLCOLORP3UIPROC glad_glColorP3ui; +void APIENTRY glad_debug_impl_glColorP3ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glColorP3ui", (void*)glColorP3ui, 2, arg0, arg1); + glad_glColorP3ui(arg0, arg1); + _post_call_callback("glColorP3ui", (void*)glColorP3ui, 2, arg0, arg1); + +} +PFNGLCOLORP3UIPROC glad_debug_glColorP3ui = glad_debug_impl_glColorP3ui; +PFNGLCOLORP3UIVPROC glad_glColorP3uiv; +void APIENTRY glad_debug_impl_glColorP3uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glColorP3uiv", (void*)glColorP3uiv, 2, arg0, arg1); + glad_glColorP3uiv(arg0, arg1); + _post_call_callback("glColorP3uiv", (void*)glColorP3uiv, 2, arg0, arg1); + +} +PFNGLCOLORP3UIVPROC glad_debug_glColorP3uiv = glad_debug_impl_glColorP3uiv; +PFNGLCOLORP4UIPROC glad_glColorP4ui; +void APIENTRY glad_debug_impl_glColorP4ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glColorP4ui", (void*)glColorP4ui, 2, arg0, arg1); + glad_glColorP4ui(arg0, arg1); + _post_call_callback("glColorP4ui", (void*)glColorP4ui, 2, arg0, arg1); + +} +PFNGLCOLORP4UIPROC glad_debug_glColorP4ui = glad_debug_impl_glColorP4ui; +PFNGLCOLORP4UIVPROC glad_glColorP4uiv; +void APIENTRY glad_debug_impl_glColorP4uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glColorP4uiv", (void*)glColorP4uiv, 2, arg0, arg1); + glad_glColorP4uiv(arg0, arg1); + _post_call_callback("glColorP4uiv", (void*)glColorP4uiv, 2, arg0, arg1); + +} +PFNGLCOLORP4UIVPROC glad_debug_glColorP4uiv = glad_debug_impl_glColorP4uiv; +PFNGLCOMPILESHADERPROC glad_glCompileShader; +void APIENTRY glad_debug_impl_glCompileShader(GLuint arg0) { + _pre_call_callback("glCompileShader", (void*)glCompileShader, 1, arg0); + glad_glCompileShader(arg0); + _post_call_callback("glCompileShader", (void*)glCompileShader, 1, arg0); + +} +PFNGLCOMPILESHADERPROC glad_debug_glCompileShader = glad_debug_impl_glCompileShader; +PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D; +void APIENTRY glad_debug_impl_glCompressedTexImage1D(GLenum arg0, GLint arg1, GLenum arg2, GLsizei arg3, GLint arg4, GLsizei arg5, const void * arg6) { + _pre_call_callback("glCompressedTexImage1D", (void*)glCompressedTexImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glCompressedTexImage1D(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glCompressedTexImage1D", (void*)glCompressedTexImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_debug_glCompressedTexImage1D = glad_debug_impl_glCompressedTexImage1D; +PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D; +void APIENTRY glad_debug_impl_glCompressedTexImage2D(GLenum arg0, GLint arg1, GLenum arg2, GLsizei arg3, GLsizei arg4, GLint arg5, GLsizei arg6, const void * arg7) { + _pre_call_callback("glCompressedTexImage2D", (void*)glCompressedTexImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + glad_glCompressedTexImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + _post_call_callback("glCompressedTexImage2D", (void*)glCompressedTexImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + +} +PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_debug_glCompressedTexImage2D = glad_debug_impl_glCompressedTexImage2D; +PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D; +void APIENTRY glad_debug_impl_glCompressedTexImage3D(GLenum arg0, GLint arg1, GLenum arg2, GLsizei arg3, GLsizei arg4, GLsizei arg5, GLint arg6, GLsizei arg7, const void * arg8) { + _pre_call_callback("glCompressedTexImage3D", (void*)glCompressedTexImage3D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + glad_glCompressedTexImage3D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + _post_call_callback("glCompressedTexImage3D", (void*)glCompressedTexImage3D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + +} +PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_debug_glCompressedTexImage3D = glad_debug_impl_glCompressedTexImage3D; +PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D; +void APIENTRY glad_debug_impl_glCompressedTexSubImage1D(GLenum arg0, GLint arg1, GLint arg2, GLsizei arg3, GLenum arg4, GLsizei arg5, const void * arg6) { + _pre_call_callback("glCompressedTexSubImage1D", (void*)glCompressedTexSubImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glCompressedTexSubImage1D(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glCompressedTexSubImage1D", (void*)glCompressedTexSubImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_debug_glCompressedTexSubImage1D = glad_debug_impl_glCompressedTexSubImage1D; +PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D; +void APIENTRY glad_debug_impl_glCompressedTexSubImage2D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLsizei arg4, GLsizei arg5, GLenum arg6, GLsizei arg7, const void * arg8) { + _pre_call_callback("glCompressedTexSubImage2D", (void*)glCompressedTexSubImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + glad_glCompressedTexSubImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + _post_call_callback("glCompressedTexSubImage2D", (void*)glCompressedTexSubImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + +} +PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_debug_glCompressedTexSubImage2D = glad_debug_impl_glCompressedTexSubImage2D; +PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D; +void APIENTRY glad_debug_impl_glCompressedTexSubImage3D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLsizei arg5, GLsizei arg6, GLsizei arg7, GLenum arg8, GLsizei arg9, const void * arg10) { + _pre_call_callback("glCompressedTexSubImage3D", (void*)glCompressedTexSubImage3D, 11, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + glad_glCompressedTexSubImage3D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + _post_call_callback("glCompressedTexSubImage3D", (void*)glCompressedTexSubImage3D, 11, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + +} +PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_debug_glCompressedTexSubImage3D = glad_debug_impl_glCompressedTexSubImage3D; +PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData; +void APIENTRY glad_debug_impl_glCopyBufferSubData(GLenum arg0, GLenum arg1, GLintptr arg2, GLintptr arg3, GLsizeiptr arg4) { + _pre_call_callback("glCopyBufferSubData", (void*)glCopyBufferSubData, 5, arg0, arg1, arg2, arg3, arg4); + glad_glCopyBufferSubData(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glCopyBufferSubData", (void*)glCopyBufferSubData, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLCOPYBUFFERSUBDATAPROC glad_debug_glCopyBufferSubData = glad_debug_impl_glCopyBufferSubData; +PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D; +void APIENTRY glad_debug_impl_glCopyTexImage1D(GLenum arg0, GLint arg1, GLenum arg2, GLint arg3, GLint arg4, GLsizei arg5, GLint arg6) { + _pre_call_callback("glCopyTexImage1D", (void*)glCopyTexImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glCopyTexImage1D(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glCopyTexImage1D", (void*)glCopyTexImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLCOPYTEXIMAGE1DPROC glad_debug_glCopyTexImage1D = glad_debug_impl_glCopyTexImage1D; +PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D; +void APIENTRY glad_debug_impl_glCopyTexImage2D(GLenum arg0, GLint arg1, GLenum arg2, GLint arg3, GLint arg4, GLsizei arg5, GLsizei arg6, GLint arg7) { + _pre_call_callback("glCopyTexImage2D", (void*)glCopyTexImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + glad_glCopyTexImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + _post_call_callback("glCopyTexImage2D", (void*)glCopyTexImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + +} +PFNGLCOPYTEXIMAGE2DPROC glad_debug_glCopyTexImage2D = glad_debug_impl_glCopyTexImage2D; +PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D; +void APIENTRY glad_debug_impl_glCopyTexSubImage1D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLsizei arg5) { + _pre_call_callback("glCopyTexSubImage1D", (void*)glCopyTexSubImage1D, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glCopyTexSubImage1D(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glCopyTexSubImage1D", (void*)glCopyTexSubImage1D, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLCOPYTEXSUBIMAGE1DPROC glad_debug_glCopyTexSubImage1D = glad_debug_impl_glCopyTexSubImage1D; +PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D; +void APIENTRY glad_debug_impl_glCopyTexSubImage2D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLint arg5, GLsizei arg6, GLsizei arg7) { + _pre_call_callback("glCopyTexSubImage2D", (void*)glCopyTexSubImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + glad_glCopyTexSubImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + _post_call_callback("glCopyTexSubImage2D", (void*)glCopyTexSubImage2D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + +} +PFNGLCOPYTEXSUBIMAGE2DPROC glad_debug_glCopyTexSubImage2D = glad_debug_impl_glCopyTexSubImage2D; +PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D; +void APIENTRY glad_debug_impl_glCopyTexSubImage3D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLint arg5, GLint arg6, GLsizei arg7, GLsizei arg8) { + _pre_call_callback("glCopyTexSubImage3D", (void*)glCopyTexSubImage3D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + glad_glCopyTexSubImage3D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + _post_call_callback("glCopyTexSubImage3D", (void*)glCopyTexSubImage3D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + +} +PFNGLCOPYTEXSUBIMAGE3DPROC glad_debug_glCopyTexSubImage3D = glad_debug_impl_glCopyTexSubImage3D; +PFNGLCREATEPROGRAMPROC glad_glCreateProgram; +GLuint APIENTRY glad_debug_impl_glCreateProgram(void) { + GLuint ret; + _pre_call_callback("glCreateProgram", (void*)glCreateProgram, 0); + ret = glad_glCreateProgram(); + _post_call_callback("glCreateProgram", (void*)glCreateProgram, 0); + return ret; +} +PFNGLCREATEPROGRAMPROC glad_debug_glCreateProgram = glad_debug_impl_glCreateProgram; +PFNGLCREATESHADERPROC glad_glCreateShader; +GLuint APIENTRY glad_debug_impl_glCreateShader(GLenum arg0) { + GLuint ret; + _pre_call_callback("glCreateShader", (void*)glCreateShader, 1, arg0); + ret = glad_glCreateShader(arg0); + _post_call_callback("glCreateShader", (void*)glCreateShader, 1, arg0); + return ret; +} +PFNGLCREATESHADERPROC glad_debug_glCreateShader = glad_debug_impl_glCreateShader; +PFNGLCULLFACEPROC glad_glCullFace; +void APIENTRY glad_debug_impl_glCullFace(GLenum arg0) { + _pre_call_callback("glCullFace", (void*)glCullFace, 1, arg0); + glad_glCullFace(arg0); + _post_call_callback("glCullFace", (void*)glCullFace, 1, arg0); + +} +PFNGLCULLFACEPROC glad_debug_glCullFace = glad_debug_impl_glCullFace; +PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers; +void APIENTRY glad_debug_impl_glDeleteBuffers(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteBuffers", (void*)glDeleteBuffers, 2, arg0, arg1); + glad_glDeleteBuffers(arg0, arg1); + _post_call_callback("glDeleteBuffers", (void*)glDeleteBuffers, 2, arg0, arg1); + +} +PFNGLDELETEBUFFERSPROC glad_debug_glDeleteBuffers = glad_debug_impl_glDeleteBuffers; +PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers; +void APIENTRY glad_debug_impl_glDeleteFramebuffers(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteFramebuffers", (void*)glDeleteFramebuffers, 2, arg0, arg1); + glad_glDeleteFramebuffers(arg0, arg1); + _post_call_callback("glDeleteFramebuffers", (void*)glDeleteFramebuffers, 2, arg0, arg1); + +} +PFNGLDELETEFRAMEBUFFERSPROC glad_debug_glDeleteFramebuffers = glad_debug_impl_glDeleteFramebuffers; +PFNGLDELETEPROGRAMPROC glad_glDeleteProgram; +void APIENTRY glad_debug_impl_glDeleteProgram(GLuint arg0) { + _pre_call_callback("glDeleteProgram", (void*)glDeleteProgram, 1, arg0); + glad_glDeleteProgram(arg0); + _post_call_callback("glDeleteProgram", (void*)glDeleteProgram, 1, arg0); + +} +PFNGLDELETEPROGRAMPROC glad_debug_glDeleteProgram = glad_debug_impl_glDeleteProgram; +PFNGLDELETEQUERIESPROC glad_glDeleteQueries; +void APIENTRY glad_debug_impl_glDeleteQueries(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteQueries", (void*)glDeleteQueries, 2, arg0, arg1); + glad_glDeleteQueries(arg0, arg1); + _post_call_callback("glDeleteQueries", (void*)glDeleteQueries, 2, arg0, arg1); + +} +PFNGLDELETEQUERIESPROC glad_debug_glDeleteQueries = glad_debug_impl_glDeleteQueries; +PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers; +void APIENTRY glad_debug_impl_glDeleteRenderbuffers(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteRenderbuffers", (void*)glDeleteRenderbuffers, 2, arg0, arg1); + glad_glDeleteRenderbuffers(arg0, arg1); + _post_call_callback("glDeleteRenderbuffers", (void*)glDeleteRenderbuffers, 2, arg0, arg1); + +} +PFNGLDELETERENDERBUFFERSPROC glad_debug_glDeleteRenderbuffers = glad_debug_impl_glDeleteRenderbuffers; +PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers; +void APIENTRY glad_debug_impl_glDeleteSamplers(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteSamplers", (void*)glDeleteSamplers, 2, arg0, arg1); + glad_glDeleteSamplers(arg0, arg1); + _post_call_callback("glDeleteSamplers", (void*)glDeleteSamplers, 2, arg0, arg1); + +} +PFNGLDELETESAMPLERSPROC glad_debug_glDeleteSamplers = glad_debug_impl_glDeleteSamplers; +PFNGLDELETESHADERPROC glad_glDeleteShader; +void APIENTRY glad_debug_impl_glDeleteShader(GLuint arg0) { + _pre_call_callback("glDeleteShader", (void*)glDeleteShader, 1, arg0); + glad_glDeleteShader(arg0); + _post_call_callback("glDeleteShader", (void*)glDeleteShader, 1, arg0); + +} +PFNGLDELETESHADERPROC glad_debug_glDeleteShader = glad_debug_impl_glDeleteShader; +PFNGLDELETESYNCPROC glad_glDeleteSync; +void APIENTRY glad_debug_impl_glDeleteSync(GLsync arg0) { + _pre_call_callback("glDeleteSync", (void*)glDeleteSync, 1, arg0); + glad_glDeleteSync(arg0); + _post_call_callback("glDeleteSync", (void*)glDeleteSync, 1, arg0); + +} +PFNGLDELETESYNCPROC glad_debug_glDeleteSync = glad_debug_impl_glDeleteSync; +PFNGLDELETETEXTURESPROC glad_glDeleteTextures; +void APIENTRY glad_debug_impl_glDeleteTextures(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteTextures", (void*)glDeleteTextures, 2, arg0, arg1); + glad_glDeleteTextures(arg0, arg1); + _post_call_callback("glDeleteTextures", (void*)glDeleteTextures, 2, arg0, arg1); + +} +PFNGLDELETETEXTURESPROC glad_debug_glDeleteTextures = glad_debug_impl_glDeleteTextures; +PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays; +void APIENTRY glad_debug_impl_glDeleteVertexArrays(GLsizei arg0, const GLuint * arg1) { + _pre_call_callback("glDeleteVertexArrays", (void*)glDeleteVertexArrays, 2, arg0, arg1); + glad_glDeleteVertexArrays(arg0, arg1); + _post_call_callback("glDeleteVertexArrays", (void*)glDeleteVertexArrays, 2, arg0, arg1); + +} +PFNGLDELETEVERTEXARRAYSPROC glad_debug_glDeleteVertexArrays = glad_debug_impl_glDeleteVertexArrays; +PFNGLDEPTHFUNCPROC glad_glDepthFunc; +void APIENTRY glad_debug_impl_glDepthFunc(GLenum arg0) { + _pre_call_callback("glDepthFunc", (void*)glDepthFunc, 1, arg0); + glad_glDepthFunc(arg0); + _post_call_callback("glDepthFunc", (void*)glDepthFunc, 1, arg0); + +} +PFNGLDEPTHFUNCPROC glad_debug_glDepthFunc = glad_debug_impl_glDepthFunc; +PFNGLDEPTHMASKPROC glad_glDepthMask; +void APIENTRY glad_debug_impl_glDepthMask(GLboolean arg0) { + _pre_call_callback("glDepthMask", (void*)glDepthMask, 1, arg0); + glad_glDepthMask(arg0); + _post_call_callback("glDepthMask", (void*)glDepthMask, 1, arg0); + +} +PFNGLDEPTHMASKPROC glad_debug_glDepthMask = glad_debug_impl_glDepthMask; +PFNGLDEPTHRANGEPROC glad_glDepthRange; +void APIENTRY glad_debug_impl_glDepthRange(GLdouble arg0, GLdouble arg1) { + _pre_call_callback("glDepthRange", (void*)glDepthRange, 2, arg0, arg1); + glad_glDepthRange(arg0, arg1); + _post_call_callback("glDepthRange", (void*)glDepthRange, 2, arg0, arg1); + +} +PFNGLDEPTHRANGEPROC glad_debug_glDepthRange = glad_debug_impl_glDepthRange; +PFNGLDETACHSHADERPROC glad_glDetachShader; +void APIENTRY glad_debug_impl_glDetachShader(GLuint arg0, GLuint arg1) { + _pre_call_callback("glDetachShader", (void*)glDetachShader, 2, arg0, arg1); + glad_glDetachShader(arg0, arg1); + _post_call_callback("glDetachShader", (void*)glDetachShader, 2, arg0, arg1); + +} +PFNGLDETACHSHADERPROC glad_debug_glDetachShader = glad_debug_impl_glDetachShader; +PFNGLDISABLEPROC glad_glDisable; +void APIENTRY glad_debug_impl_glDisable(GLenum arg0) { + _pre_call_callback("glDisable", (void*)glDisable, 1, arg0); + glad_glDisable(arg0); + _post_call_callback("glDisable", (void*)glDisable, 1, arg0); + +} +PFNGLDISABLEPROC glad_debug_glDisable = glad_debug_impl_glDisable; +PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray; +void APIENTRY glad_debug_impl_glDisableVertexAttribArray(GLuint arg0) { + _pre_call_callback("glDisableVertexAttribArray", (void*)glDisableVertexAttribArray, 1, arg0); + glad_glDisableVertexAttribArray(arg0); + _post_call_callback("glDisableVertexAttribArray", (void*)glDisableVertexAttribArray, 1, arg0); + +} +PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_debug_glDisableVertexAttribArray = glad_debug_impl_glDisableVertexAttribArray; +PFNGLDISABLEIPROC glad_glDisablei; +void APIENTRY glad_debug_impl_glDisablei(GLenum arg0, GLuint arg1) { + _pre_call_callback("glDisablei", (void*)glDisablei, 2, arg0, arg1); + glad_glDisablei(arg0, arg1); + _post_call_callback("glDisablei", (void*)glDisablei, 2, arg0, arg1); + +} +PFNGLDISABLEIPROC glad_debug_glDisablei = glad_debug_impl_glDisablei; +PFNGLDRAWARRAYSPROC glad_glDrawArrays; +void APIENTRY glad_debug_impl_glDrawArrays(GLenum arg0, GLint arg1, GLsizei arg2) { + _pre_call_callback("glDrawArrays", (void*)glDrawArrays, 3, arg0, arg1, arg2); + glad_glDrawArrays(arg0, arg1, arg2); + _post_call_callback("glDrawArrays", (void*)glDrawArrays, 3, arg0, arg1, arg2); + +} +PFNGLDRAWARRAYSPROC glad_debug_glDrawArrays = glad_debug_impl_glDrawArrays; +PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced; +void APIENTRY glad_debug_impl_glDrawArraysInstanced(GLenum arg0, GLint arg1, GLsizei arg2, GLsizei arg3) { + _pre_call_callback("glDrawArraysInstanced", (void*)glDrawArraysInstanced, 4, arg0, arg1, arg2, arg3); + glad_glDrawArraysInstanced(arg0, arg1, arg2, arg3); + _post_call_callback("glDrawArraysInstanced", (void*)glDrawArraysInstanced, 4, arg0, arg1, arg2, arg3); + +} +PFNGLDRAWARRAYSINSTANCEDPROC glad_debug_glDrawArraysInstanced = glad_debug_impl_glDrawArraysInstanced; +PFNGLDRAWBUFFERPROC glad_glDrawBuffer; +void APIENTRY glad_debug_impl_glDrawBuffer(GLenum arg0) { + _pre_call_callback("glDrawBuffer", (void*)glDrawBuffer, 1, arg0); + glad_glDrawBuffer(arg0); + _post_call_callback("glDrawBuffer", (void*)glDrawBuffer, 1, arg0); + +} +PFNGLDRAWBUFFERPROC glad_debug_glDrawBuffer = glad_debug_impl_glDrawBuffer; +PFNGLDRAWBUFFERSPROC glad_glDrawBuffers; +void APIENTRY glad_debug_impl_glDrawBuffers(GLsizei arg0, const GLenum * arg1) { + _pre_call_callback("glDrawBuffers", (void*)glDrawBuffers, 2, arg0, arg1); + glad_glDrawBuffers(arg0, arg1); + _post_call_callback("glDrawBuffers", (void*)glDrawBuffers, 2, arg0, arg1); + +} +PFNGLDRAWBUFFERSPROC glad_debug_glDrawBuffers = glad_debug_impl_glDrawBuffers; +PFNGLDRAWELEMENTSPROC glad_glDrawElements; +void APIENTRY glad_debug_impl_glDrawElements(GLenum arg0, GLsizei arg1, GLenum arg2, const void * arg3) { + _pre_call_callback("glDrawElements", (void*)glDrawElements, 4, arg0, arg1, arg2, arg3); + glad_glDrawElements(arg0, arg1, arg2, arg3); + _post_call_callback("glDrawElements", (void*)glDrawElements, 4, arg0, arg1, arg2, arg3); + +} +PFNGLDRAWELEMENTSPROC glad_debug_glDrawElements = glad_debug_impl_glDrawElements; +PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex; +void APIENTRY glad_debug_impl_glDrawElementsBaseVertex(GLenum arg0, GLsizei arg1, GLenum arg2, const void * arg3, GLint arg4) { + _pre_call_callback("glDrawElementsBaseVertex", (void*)glDrawElementsBaseVertex, 5, arg0, arg1, arg2, arg3, arg4); + glad_glDrawElementsBaseVertex(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glDrawElementsBaseVertex", (void*)glDrawElementsBaseVertex, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLDRAWELEMENTSBASEVERTEXPROC glad_debug_glDrawElementsBaseVertex = glad_debug_impl_glDrawElementsBaseVertex; +PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced; +void APIENTRY glad_debug_impl_glDrawElementsInstanced(GLenum arg0, GLsizei arg1, GLenum arg2, const void * arg3, GLsizei arg4) { + _pre_call_callback("glDrawElementsInstanced", (void*)glDrawElementsInstanced, 5, arg0, arg1, arg2, arg3, arg4); + glad_glDrawElementsInstanced(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glDrawElementsInstanced", (void*)glDrawElementsInstanced, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLDRAWELEMENTSINSTANCEDPROC glad_debug_glDrawElementsInstanced = glad_debug_impl_glDrawElementsInstanced; +PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex; +void APIENTRY glad_debug_impl_glDrawElementsInstancedBaseVertex(GLenum arg0, GLsizei arg1, GLenum arg2, const void * arg3, GLsizei arg4, GLint arg5) { + _pre_call_callback("glDrawElementsInstancedBaseVertex", (void*)glDrawElementsInstancedBaseVertex, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glDrawElementsInstancedBaseVertex(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glDrawElementsInstancedBaseVertex", (void*)glDrawElementsInstancedBaseVertex, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_debug_glDrawElementsInstancedBaseVertex = glad_debug_impl_glDrawElementsInstancedBaseVertex; +PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements; +void APIENTRY glad_debug_impl_glDrawRangeElements(GLenum arg0, GLuint arg1, GLuint arg2, GLsizei arg3, GLenum arg4, const void * arg5) { + _pre_call_callback("glDrawRangeElements", (void*)glDrawRangeElements, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glDrawRangeElements(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glDrawRangeElements", (void*)glDrawRangeElements, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLDRAWRANGEELEMENTSPROC glad_debug_glDrawRangeElements = glad_debug_impl_glDrawRangeElements; +PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex; +void APIENTRY glad_debug_impl_glDrawRangeElementsBaseVertex(GLenum arg0, GLuint arg1, GLuint arg2, GLsizei arg3, GLenum arg4, const void * arg5, GLint arg6) { + _pre_call_callback("glDrawRangeElementsBaseVertex", (void*)glDrawRangeElementsBaseVertex, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glDrawRangeElementsBaseVertex(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glDrawRangeElementsBaseVertex", (void*)glDrawRangeElementsBaseVertex, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_debug_glDrawRangeElementsBaseVertex = glad_debug_impl_glDrawRangeElementsBaseVertex; +PFNGLENABLEPROC glad_glEnable; +void APIENTRY glad_debug_impl_glEnable(GLenum arg0) { + _pre_call_callback("glEnable", (void*)glEnable, 1, arg0); + glad_glEnable(arg0); + _post_call_callback("glEnable", (void*)glEnable, 1, arg0); + +} +PFNGLENABLEPROC glad_debug_glEnable = glad_debug_impl_glEnable; +PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray; +void APIENTRY glad_debug_impl_glEnableVertexAttribArray(GLuint arg0) { + _pre_call_callback("glEnableVertexAttribArray", (void*)glEnableVertexAttribArray, 1, arg0); + glad_glEnableVertexAttribArray(arg0); + _post_call_callback("glEnableVertexAttribArray", (void*)glEnableVertexAttribArray, 1, arg0); + +} +PFNGLENABLEVERTEXATTRIBARRAYPROC glad_debug_glEnableVertexAttribArray = glad_debug_impl_glEnableVertexAttribArray; +PFNGLENABLEIPROC glad_glEnablei; +void APIENTRY glad_debug_impl_glEnablei(GLenum arg0, GLuint arg1) { + _pre_call_callback("glEnablei", (void*)glEnablei, 2, arg0, arg1); + glad_glEnablei(arg0, arg1); + _post_call_callback("glEnablei", (void*)glEnablei, 2, arg0, arg1); + +} +PFNGLENABLEIPROC glad_debug_glEnablei = glad_debug_impl_glEnablei; +PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender; +void APIENTRY glad_debug_impl_glEndConditionalRender(void) { + _pre_call_callback("glEndConditionalRender", (void*)glEndConditionalRender, 0); + glad_glEndConditionalRender(); + _post_call_callback("glEndConditionalRender", (void*)glEndConditionalRender, 0); + +} +PFNGLENDCONDITIONALRENDERPROC glad_debug_glEndConditionalRender = glad_debug_impl_glEndConditionalRender; +PFNGLENDQUERYPROC glad_glEndQuery; +void APIENTRY glad_debug_impl_glEndQuery(GLenum arg0) { + _pre_call_callback("glEndQuery", (void*)glEndQuery, 1, arg0); + glad_glEndQuery(arg0); + _post_call_callback("glEndQuery", (void*)glEndQuery, 1, arg0); + +} +PFNGLENDQUERYPROC glad_debug_glEndQuery = glad_debug_impl_glEndQuery; +PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback; +void APIENTRY glad_debug_impl_glEndTransformFeedback(void) { + _pre_call_callback("glEndTransformFeedback", (void*)glEndTransformFeedback, 0); + glad_glEndTransformFeedback(); + _post_call_callback("glEndTransformFeedback", (void*)glEndTransformFeedback, 0); + +} +PFNGLENDTRANSFORMFEEDBACKPROC glad_debug_glEndTransformFeedback = glad_debug_impl_glEndTransformFeedback; +PFNGLFENCESYNCPROC glad_glFenceSync; +GLsync APIENTRY glad_debug_impl_glFenceSync(GLenum arg0, GLbitfield arg1) { + GLsync ret; + _pre_call_callback("glFenceSync", (void*)glFenceSync, 2, arg0, arg1); + ret = glad_glFenceSync(arg0, arg1); + _post_call_callback("glFenceSync", (void*)glFenceSync, 2, arg0, arg1); + return ret; +} +PFNGLFENCESYNCPROC glad_debug_glFenceSync = glad_debug_impl_glFenceSync; +PFNGLFINISHPROC glad_glFinish; +void APIENTRY glad_debug_impl_glFinish(void) { + _pre_call_callback("glFinish", (void*)glFinish, 0); + glad_glFinish(); + _post_call_callback("glFinish", (void*)glFinish, 0); + +} +PFNGLFINISHPROC glad_debug_glFinish = glad_debug_impl_glFinish; +PFNGLFLUSHPROC glad_glFlush; +void APIENTRY glad_debug_impl_glFlush(void) { + _pre_call_callback("glFlush", (void*)glFlush, 0); + glad_glFlush(); + _post_call_callback("glFlush", (void*)glFlush, 0); + +} +PFNGLFLUSHPROC glad_debug_glFlush = glad_debug_impl_glFlush; +PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange; +void APIENTRY glad_debug_impl_glFlushMappedBufferRange(GLenum arg0, GLintptr arg1, GLsizeiptr arg2) { + _pre_call_callback("glFlushMappedBufferRange", (void*)glFlushMappedBufferRange, 3, arg0, arg1, arg2); + glad_glFlushMappedBufferRange(arg0, arg1, arg2); + _post_call_callback("glFlushMappedBufferRange", (void*)glFlushMappedBufferRange, 3, arg0, arg1, arg2); + +} +PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_debug_glFlushMappedBufferRange = glad_debug_impl_glFlushMappedBufferRange; +PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer; +void APIENTRY glad_debug_impl_glFramebufferRenderbuffer(GLenum arg0, GLenum arg1, GLenum arg2, GLuint arg3) { + _pre_call_callback("glFramebufferRenderbuffer", (void*)glFramebufferRenderbuffer, 4, arg0, arg1, arg2, arg3); + glad_glFramebufferRenderbuffer(arg0, arg1, arg2, arg3); + _post_call_callback("glFramebufferRenderbuffer", (void*)glFramebufferRenderbuffer, 4, arg0, arg1, arg2, arg3); + +} +PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_debug_glFramebufferRenderbuffer = glad_debug_impl_glFramebufferRenderbuffer; +PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture; +void APIENTRY glad_debug_impl_glFramebufferTexture(GLenum arg0, GLenum arg1, GLuint arg2, GLint arg3) { + _pre_call_callback("glFramebufferTexture", (void*)glFramebufferTexture, 4, arg0, arg1, arg2, arg3); + glad_glFramebufferTexture(arg0, arg1, arg2, arg3); + _post_call_callback("glFramebufferTexture", (void*)glFramebufferTexture, 4, arg0, arg1, arg2, arg3); + +} +PFNGLFRAMEBUFFERTEXTUREPROC glad_debug_glFramebufferTexture = glad_debug_impl_glFramebufferTexture; +PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D; +void APIENTRY glad_debug_impl_glFramebufferTexture1D(GLenum arg0, GLenum arg1, GLenum arg2, GLuint arg3, GLint arg4) { + _pre_call_callback("glFramebufferTexture1D", (void*)glFramebufferTexture1D, 5, arg0, arg1, arg2, arg3, arg4); + glad_glFramebufferTexture1D(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glFramebufferTexture1D", (void*)glFramebufferTexture1D, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLFRAMEBUFFERTEXTURE1DPROC glad_debug_glFramebufferTexture1D = glad_debug_impl_glFramebufferTexture1D; +PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D; +void APIENTRY glad_debug_impl_glFramebufferTexture2D(GLenum arg0, GLenum arg1, GLenum arg2, GLuint arg3, GLint arg4) { + _pre_call_callback("glFramebufferTexture2D", (void*)glFramebufferTexture2D, 5, arg0, arg1, arg2, arg3, arg4); + glad_glFramebufferTexture2D(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glFramebufferTexture2D", (void*)glFramebufferTexture2D, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLFRAMEBUFFERTEXTURE2DPROC glad_debug_glFramebufferTexture2D = glad_debug_impl_glFramebufferTexture2D; +PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D; +void APIENTRY glad_debug_impl_glFramebufferTexture3D(GLenum arg0, GLenum arg1, GLenum arg2, GLuint arg3, GLint arg4, GLint arg5) { + _pre_call_callback("glFramebufferTexture3D", (void*)glFramebufferTexture3D, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glFramebufferTexture3D(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glFramebufferTexture3D", (void*)glFramebufferTexture3D, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLFRAMEBUFFERTEXTURE3DPROC glad_debug_glFramebufferTexture3D = glad_debug_impl_glFramebufferTexture3D; +PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer; +void APIENTRY glad_debug_impl_glFramebufferTextureLayer(GLenum arg0, GLenum arg1, GLuint arg2, GLint arg3, GLint arg4) { + _pre_call_callback("glFramebufferTextureLayer", (void*)glFramebufferTextureLayer, 5, arg0, arg1, arg2, arg3, arg4); + glad_glFramebufferTextureLayer(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glFramebufferTextureLayer", (void*)glFramebufferTextureLayer, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_debug_glFramebufferTextureLayer = glad_debug_impl_glFramebufferTextureLayer; +PFNGLFRONTFACEPROC glad_glFrontFace; +void APIENTRY glad_debug_impl_glFrontFace(GLenum arg0) { + _pre_call_callback("glFrontFace", (void*)glFrontFace, 1, arg0); + glad_glFrontFace(arg0); + _post_call_callback("glFrontFace", (void*)glFrontFace, 1, arg0); + +} +PFNGLFRONTFACEPROC glad_debug_glFrontFace = glad_debug_impl_glFrontFace; +PFNGLGENBUFFERSPROC glad_glGenBuffers; +void APIENTRY glad_debug_impl_glGenBuffers(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenBuffers", (void*)glGenBuffers, 2, arg0, arg1); + glad_glGenBuffers(arg0, arg1); + _post_call_callback("glGenBuffers", (void*)glGenBuffers, 2, arg0, arg1); + +} +PFNGLGENBUFFERSPROC glad_debug_glGenBuffers = glad_debug_impl_glGenBuffers; +PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers; +void APIENTRY glad_debug_impl_glGenFramebuffers(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenFramebuffers", (void*)glGenFramebuffers, 2, arg0, arg1); + glad_glGenFramebuffers(arg0, arg1); + _post_call_callback("glGenFramebuffers", (void*)glGenFramebuffers, 2, arg0, arg1); + +} +PFNGLGENFRAMEBUFFERSPROC glad_debug_glGenFramebuffers = glad_debug_impl_glGenFramebuffers; +PFNGLGENQUERIESPROC glad_glGenQueries; +void APIENTRY glad_debug_impl_glGenQueries(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenQueries", (void*)glGenQueries, 2, arg0, arg1); + glad_glGenQueries(arg0, arg1); + _post_call_callback("glGenQueries", (void*)glGenQueries, 2, arg0, arg1); + +} +PFNGLGENQUERIESPROC glad_debug_glGenQueries = glad_debug_impl_glGenQueries; +PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers; +void APIENTRY glad_debug_impl_glGenRenderbuffers(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenRenderbuffers", (void*)glGenRenderbuffers, 2, arg0, arg1); + glad_glGenRenderbuffers(arg0, arg1); + _post_call_callback("glGenRenderbuffers", (void*)glGenRenderbuffers, 2, arg0, arg1); + +} +PFNGLGENRENDERBUFFERSPROC glad_debug_glGenRenderbuffers = glad_debug_impl_glGenRenderbuffers; +PFNGLGENSAMPLERSPROC glad_glGenSamplers; +void APIENTRY glad_debug_impl_glGenSamplers(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenSamplers", (void*)glGenSamplers, 2, arg0, arg1); + glad_glGenSamplers(arg0, arg1); + _post_call_callback("glGenSamplers", (void*)glGenSamplers, 2, arg0, arg1); + +} +PFNGLGENSAMPLERSPROC glad_debug_glGenSamplers = glad_debug_impl_glGenSamplers; +PFNGLGENTEXTURESPROC glad_glGenTextures; +void APIENTRY glad_debug_impl_glGenTextures(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenTextures", (void*)glGenTextures, 2, arg0, arg1); + glad_glGenTextures(arg0, arg1); + _post_call_callback("glGenTextures", (void*)glGenTextures, 2, arg0, arg1); + +} +PFNGLGENTEXTURESPROC glad_debug_glGenTextures = glad_debug_impl_glGenTextures; +PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays; +void APIENTRY glad_debug_impl_glGenVertexArrays(GLsizei arg0, GLuint * arg1) { + _pre_call_callback("glGenVertexArrays", (void*)glGenVertexArrays, 2, arg0, arg1); + glad_glGenVertexArrays(arg0, arg1); + _post_call_callback("glGenVertexArrays", (void*)glGenVertexArrays, 2, arg0, arg1); + +} +PFNGLGENVERTEXARRAYSPROC glad_debug_glGenVertexArrays = glad_debug_impl_glGenVertexArrays; +PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap; +void APIENTRY glad_debug_impl_glGenerateMipmap(GLenum arg0) { + _pre_call_callback("glGenerateMipmap", (void*)glGenerateMipmap, 1, arg0); + glad_glGenerateMipmap(arg0); + _post_call_callback("glGenerateMipmap", (void*)glGenerateMipmap, 1, arg0); + +} +PFNGLGENERATEMIPMAPPROC glad_debug_glGenerateMipmap = glad_debug_impl_glGenerateMipmap; +PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib; +void APIENTRY glad_debug_impl_glGetActiveAttrib(GLuint arg0, GLuint arg1, GLsizei arg2, GLsizei * arg3, GLint * arg4, GLenum * arg5, GLchar * arg6) { + _pre_call_callback("glGetActiveAttrib", (void*)glGetActiveAttrib, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glGetActiveAttrib(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glGetActiveAttrib", (void*)glGetActiveAttrib, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLGETACTIVEATTRIBPROC glad_debug_glGetActiveAttrib = glad_debug_impl_glGetActiveAttrib; +PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform; +void APIENTRY glad_debug_impl_glGetActiveUniform(GLuint arg0, GLuint arg1, GLsizei arg2, GLsizei * arg3, GLint * arg4, GLenum * arg5, GLchar * arg6) { + _pre_call_callback("glGetActiveUniform", (void*)glGetActiveUniform, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glGetActiveUniform(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glGetActiveUniform", (void*)glGetActiveUniform, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLGETACTIVEUNIFORMPROC glad_debug_glGetActiveUniform = glad_debug_impl_glGetActiveUniform; +PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName; +void APIENTRY glad_debug_impl_glGetActiveUniformBlockName(GLuint arg0, GLuint arg1, GLsizei arg2, GLsizei * arg3, GLchar * arg4) { + _pre_call_callback("glGetActiveUniformBlockName", (void*)glGetActiveUniformBlockName, 5, arg0, arg1, arg2, arg3, arg4); + glad_glGetActiveUniformBlockName(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glGetActiveUniformBlockName", (void*)glGetActiveUniformBlockName, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_debug_glGetActiveUniformBlockName = glad_debug_impl_glGetActiveUniformBlockName; +PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv; +void APIENTRY glad_debug_impl_glGetActiveUniformBlockiv(GLuint arg0, GLuint arg1, GLenum arg2, GLint * arg3) { + _pre_call_callback("glGetActiveUniformBlockiv", (void*)glGetActiveUniformBlockiv, 4, arg0, arg1, arg2, arg3); + glad_glGetActiveUniformBlockiv(arg0, arg1, arg2, arg3); + _post_call_callback("glGetActiveUniformBlockiv", (void*)glGetActiveUniformBlockiv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_debug_glGetActiveUniformBlockiv = glad_debug_impl_glGetActiveUniformBlockiv; +PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName; +void APIENTRY glad_debug_impl_glGetActiveUniformName(GLuint arg0, GLuint arg1, GLsizei arg2, GLsizei * arg3, GLchar * arg4) { + _pre_call_callback("glGetActiveUniformName", (void*)glGetActiveUniformName, 5, arg0, arg1, arg2, arg3, arg4); + glad_glGetActiveUniformName(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glGetActiveUniformName", (void*)glGetActiveUniformName, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLGETACTIVEUNIFORMNAMEPROC glad_debug_glGetActiveUniformName = glad_debug_impl_glGetActiveUniformName; +PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv; +void APIENTRY glad_debug_impl_glGetActiveUniformsiv(GLuint arg0, GLsizei arg1, const GLuint * arg2, GLenum arg3, GLint * arg4) { + _pre_call_callback("glGetActiveUniformsiv", (void*)glGetActiveUniformsiv, 5, arg0, arg1, arg2, arg3, arg4); + glad_glGetActiveUniformsiv(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glGetActiveUniformsiv", (void*)glGetActiveUniformsiv, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLGETACTIVEUNIFORMSIVPROC glad_debug_glGetActiveUniformsiv = glad_debug_impl_glGetActiveUniformsiv; +PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders; +void APIENTRY glad_debug_impl_glGetAttachedShaders(GLuint arg0, GLsizei arg1, GLsizei * arg2, GLuint * arg3) { + _pre_call_callback("glGetAttachedShaders", (void*)glGetAttachedShaders, 4, arg0, arg1, arg2, arg3); + glad_glGetAttachedShaders(arg0, arg1, arg2, arg3); + _post_call_callback("glGetAttachedShaders", (void*)glGetAttachedShaders, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETATTACHEDSHADERSPROC glad_debug_glGetAttachedShaders = glad_debug_impl_glGetAttachedShaders; +PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation; +GLint APIENTRY glad_debug_impl_glGetAttribLocation(GLuint arg0, const GLchar * arg1) { + GLint ret; + _pre_call_callback("glGetAttribLocation", (void*)glGetAttribLocation, 2, arg0, arg1); + ret = glad_glGetAttribLocation(arg0, arg1); + _post_call_callback("glGetAttribLocation", (void*)glGetAttribLocation, 2, arg0, arg1); + return ret; +} +PFNGLGETATTRIBLOCATIONPROC glad_debug_glGetAttribLocation = glad_debug_impl_glGetAttribLocation; +PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v; +void APIENTRY glad_debug_impl_glGetBooleani_v(GLenum arg0, GLuint arg1, GLboolean * arg2) { + _pre_call_callback("glGetBooleani_v", (void*)glGetBooleani_v, 3, arg0, arg1, arg2); + glad_glGetBooleani_v(arg0, arg1, arg2); + _post_call_callback("glGetBooleani_v", (void*)glGetBooleani_v, 3, arg0, arg1, arg2); + +} +PFNGLGETBOOLEANI_VPROC glad_debug_glGetBooleani_v = glad_debug_impl_glGetBooleani_v; +PFNGLGETBOOLEANVPROC glad_glGetBooleanv; +void APIENTRY glad_debug_impl_glGetBooleanv(GLenum arg0, GLboolean * arg1) { + _pre_call_callback("glGetBooleanv", (void*)glGetBooleanv, 2, arg0, arg1); + glad_glGetBooleanv(arg0, arg1); + _post_call_callback("glGetBooleanv", (void*)glGetBooleanv, 2, arg0, arg1); + +} +PFNGLGETBOOLEANVPROC glad_debug_glGetBooleanv = glad_debug_impl_glGetBooleanv; +PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v; +void APIENTRY glad_debug_impl_glGetBufferParameteri64v(GLenum arg0, GLenum arg1, GLint64 * arg2) { + _pre_call_callback("glGetBufferParameteri64v", (void*)glGetBufferParameteri64v, 3, arg0, arg1, arg2); + glad_glGetBufferParameteri64v(arg0, arg1, arg2); + _post_call_callback("glGetBufferParameteri64v", (void*)glGetBufferParameteri64v, 3, arg0, arg1, arg2); + +} +PFNGLGETBUFFERPARAMETERI64VPROC glad_debug_glGetBufferParameteri64v = glad_debug_impl_glGetBufferParameteri64v; +PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv; +void APIENTRY glad_debug_impl_glGetBufferParameteriv(GLenum arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetBufferParameteriv", (void*)glGetBufferParameteriv, 3, arg0, arg1, arg2); + glad_glGetBufferParameteriv(arg0, arg1, arg2); + _post_call_callback("glGetBufferParameteriv", (void*)glGetBufferParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLGETBUFFERPARAMETERIVPROC glad_debug_glGetBufferParameteriv = glad_debug_impl_glGetBufferParameteriv; +PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv; +void APIENTRY glad_debug_impl_glGetBufferPointerv(GLenum arg0, GLenum arg1, void ** arg2) { + _pre_call_callback("glGetBufferPointerv", (void*)glGetBufferPointerv, 3, arg0, arg1, arg2); + glad_glGetBufferPointerv(arg0, arg1, arg2); + _post_call_callback("glGetBufferPointerv", (void*)glGetBufferPointerv, 3, arg0, arg1, arg2); + +} +PFNGLGETBUFFERPOINTERVPROC glad_debug_glGetBufferPointerv = glad_debug_impl_glGetBufferPointerv; +PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData; +void APIENTRY glad_debug_impl_glGetBufferSubData(GLenum arg0, GLintptr arg1, GLsizeiptr arg2, void * arg3) { + _pre_call_callback("glGetBufferSubData", (void*)glGetBufferSubData, 4, arg0, arg1, arg2, arg3); + glad_glGetBufferSubData(arg0, arg1, arg2, arg3); + _post_call_callback("glGetBufferSubData", (void*)glGetBufferSubData, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETBUFFERSUBDATAPROC glad_debug_glGetBufferSubData = glad_debug_impl_glGetBufferSubData; +PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage; +void APIENTRY glad_debug_impl_glGetCompressedTexImage(GLenum arg0, GLint arg1, void * arg2) { + _pre_call_callback("glGetCompressedTexImage", (void*)glGetCompressedTexImage, 3, arg0, arg1, arg2); + glad_glGetCompressedTexImage(arg0, arg1, arg2); + _post_call_callback("glGetCompressedTexImage", (void*)glGetCompressedTexImage, 3, arg0, arg1, arg2); + +} +PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_debug_glGetCompressedTexImage = glad_debug_impl_glGetCompressedTexImage; +PFNGLGETDOUBLEVPROC glad_glGetDoublev; +void APIENTRY glad_debug_impl_glGetDoublev(GLenum arg0, GLdouble * arg1) { + _pre_call_callback("glGetDoublev", (void*)glGetDoublev, 2, arg0, arg1); + glad_glGetDoublev(arg0, arg1); + _post_call_callback("glGetDoublev", (void*)glGetDoublev, 2, arg0, arg1); + +} +PFNGLGETDOUBLEVPROC glad_debug_glGetDoublev = glad_debug_impl_glGetDoublev; +PFNGLGETERRORPROC glad_glGetError; +GLenum APIENTRY glad_debug_impl_glGetError(void) { + GLenum ret; + _pre_call_callback("glGetError", (void*)glGetError, 0); + ret = glad_glGetError(); + _post_call_callback("glGetError", (void*)glGetError, 0); + return ret; +} +PFNGLGETERRORPROC glad_debug_glGetError = glad_debug_impl_glGetError; +PFNGLGETFLOATVPROC glad_glGetFloatv; +void APIENTRY glad_debug_impl_glGetFloatv(GLenum arg0, GLfloat * arg1) { + _pre_call_callback("glGetFloatv", (void*)glGetFloatv, 2, arg0, arg1); + glad_glGetFloatv(arg0, arg1); + _post_call_callback("glGetFloatv", (void*)glGetFloatv, 2, arg0, arg1); + +} +PFNGLGETFLOATVPROC glad_debug_glGetFloatv = glad_debug_impl_glGetFloatv; +PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex; +GLint APIENTRY glad_debug_impl_glGetFragDataIndex(GLuint arg0, const GLchar * arg1) { + GLint ret; + _pre_call_callback("glGetFragDataIndex", (void*)glGetFragDataIndex, 2, arg0, arg1); + ret = glad_glGetFragDataIndex(arg0, arg1); + _post_call_callback("glGetFragDataIndex", (void*)glGetFragDataIndex, 2, arg0, arg1); + return ret; +} +PFNGLGETFRAGDATAINDEXPROC glad_debug_glGetFragDataIndex = glad_debug_impl_glGetFragDataIndex; +PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation; +GLint APIENTRY glad_debug_impl_glGetFragDataLocation(GLuint arg0, const GLchar * arg1) { + GLint ret; + _pre_call_callback("glGetFragDataLocation", (void*)glGetFragDataLocation, 2, arg0, arg1); + ret = glad_glGetFragDataLocation(arg0, arg1); + _post_call_callback("glGetFragDataLocation", (void*)glGetFragDataLocation, 2, arg0, arg1); + return ret; +} +PFNGLGETFRAGDATALOCATIONPROC glad_debug_glGetFragDataLocation = glad_debug_impl_glGetFragDataLocation; +PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv; +void APIENTRY glad_debug_impl_glGetFramebufferAttachmentParameteriv(GLenum arg0, GLenum arg1, GLenum arg2, GLint * arg3) { + _pre_call_callback("glGetFramebufferAttachmentParameteriv", (void*)glGetFramebufferAttachmentParameteriv, 4, arg0, arg1, arg2, arg3); + glad_glGetFramebufferAttachmentParameteriv(arg0, arg1, arg2, arg3); + _post_call_callback("glGetFramebufferAttachmentParameteriv", (void*)glGetFramebufferAttachmentParameteriv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_debug_glGetFramebufferAttachmentParameteriv = glad_debug_impl_glGetFramebufferAttachmentParameteriv; +PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v; +void APIENTRY glad_debug_impl_glGetInteger64i_v(GLenum arg0, GLuint arg1, GLint64 * arg2) { + _pre_call_callback("glGetInteger64i_v", (void*)glGetInteger64i_v, 3, arg0, arg1, arg2); + glad_glGetInteger64i_v(arg0, arg1, arg2); + _post_call_callback("glGetInteger64i_v", (void*)glGetInteger64i_v, 3, arg0, arg1, arg2); + +} +PFNGLGETINTEGER64I_VPROC glad_debug_glGetInteger64i_v = glad_debug_impl_glGetInteger64i_v; +PFNGLGETINTEGER64VPROC glad_glGetInteger64v; +void APIENTRY glad_debug_impl_glGetInteger64v(GLenum arg0, GLint64 * arg1) { + _pre_call_callback("glGetInteger64v", (void*)glGetInteger64v, 2, arg0, arg1); + glad_glGetInteger64v(arg0, arg1); + _post_call_callback("glGetInteger64v", (void*)glGetInteger64v, 2, arg0, arg1); + +} +PFNGLGETINTEGER64VPROC glad_debug_glGetInteger64v = glad_debug_impl_glGetInteger64v; +PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v; +void APIENTRY glad_debug_impl_glGetIntegeri_v(GLenum arg0, GLuint arg1, GLint * arg2) { + _pre_call_callback("glGetIntegeri_v", (void*)glGetIntegeri_v, 3, arg0, arg1, arg2); + glad_glGetIntegeri_v(arg0, arg1, arg2); + _post_call_callback("glGetIntegeri_v", (void*)glGetIntegeri_v, 3, arg0, arg1, arg2); + +} +PFNGLGETINTEGERI_VPROC glad_debug_glGetIntegeri_v = glad_debug_impl_glGetIntegeri_v; +PFNGLGETINTEGERVPROC glad_glGetIntegerv; +void APIENTRY glad_debug_impl_glGetIntegerv(GLenum arg0, GLint * arg1) { + _pre_call_callback("glGetIntegerv", (void*)glGetIntegerv, 2, arg0, arg1); + glad_glGetIntegerv(arg0, arg1); + _post_call_callback("glGetIntegerv", (void*)glGetIntegerv, 2, arg0, arg1); + +} +PFNGLGETINTEGERVPROC glad_debug_glGetIntegerv = glad_debug_impl_glGetIntegerv; +PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv; +void APIENTRY glad_debug_impl_glGetMultisamplefv(GLenum arg0, GLuint arg1, GLfloat * arg2) { + _pre_call_callback("glGetMultisamplefv", (void*)glGetMultisamplefv, 3, arg0, arg1, arg2); + glad_glGetMultisamplefv(arg0, arg1, arg2); + _post_call_callback("glGetMultisamplefv", (void*)glGetMultisamplefv, 3, arg0, arg1, arg2); + +} +PFNGLGETMULTISAMPLEFVPROC glad_debug_glGetMultisamplefv = glad_debug_impl_glGetMultisamplefv; +PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog; +void APIENTRY glad_debug_impl_glGetProgramInfoLog(GLuint arg0, GLsizei arg1, GLsizei * arg2, GLchar * arg3) { + _pre_call_callback("glGetProgramInfoLog", (void*)glGetProgramInfoLog, 4, arg0, arg1, arg2, arg3); + glad_glGetProgramInfoLog(arg0, arg1, arg2, arg3); + _post_call_callback("glGetProgramInfoLog", (void*)glGetProgramInfoLog, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETPROGRAMINFOLOGPROC glad_debug_glGetProgramInfoLog = glad_debug_impl_glGetProgramInfoLog; +PFNGLGETPROGRAMIVPROC glad_glGetProgramiv; +void APIENTRY glad_debug_impl_glGetProgramiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetProgramiv", (void*)glGetProgramiv, 3, arg0, arg1, arg2); + glad_glGetProgramiv(arg0, arg1, arg2); + _post_call_callback("glGetProgramiv", (void*)glGetProgramiv, 3, arg0, arg1, arg2); + +} +PFNGLGETPROGRAMIVPROC glad_debug_glGetProgramiv = glad_debug_impl_glGetProgramiv; +PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v; +void APIENTRY glad_debug_impl_glGetQueryObjecti64v(GLuint arg0, GLenum arg1, GLint64 * arg2) { + _pre_call_callback("glGetQueryObjecti64v", (void*)glGetQueryObjecti64v, 3, arg0, arg1, arg2); + glad_glGetQueryObjecti64v(arg0, arg1, arg2); + _post_call_callback("glGetQueryObjecti64v", (void*)glGetQueryObjecti64v, 3, arg0, arg1, arg2); + +} +PFNGLGETQUERYOBJECTI64VPROC glad_debug_glGetQueryObjecti64v = glad_debug_impl_glGetQueryObjecti64v; +PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv; +void APIENTRY glad_debug_impl_glGetQueryObjectiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetQueryObjectiv", (void*)glGetQueryObjectiv, 3, arg0, arg1, arg2); + glad_glGetQueryObjectiv(arg0, arg1, arg2); + _post_call_callback("glGetQueryObjectiv", (void*)glGetQueryObjectiv, 3, arg0, arg1, arg2); + +} +PFNGLGETQUERYOBJECTIVPROC glad_debug_glGetQueryObjectiv = glad_debug_impl_glGetQueryObjectiv; +PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v; +void APIENTRY glad_debug_impl_glGetQueryObjectui64v(GLuint arg0, GLenum arg1, GLuint64 * arg2) { + _pre_call_callback("glGetQueryObjectui64v", (void*)glGetQueryObjectui64v, 3, arg0, arg1, arg2); + glad_glGetQueryObjectui64v(arg0, arg1, arg2); + _post_call_callback("glGetQueryObjectui64v", (void*)glGetQueryObjectui64v, 3, arg0, arg1, arg2); + +} +PFNGLGETQUERYOBJECTUI64VPROC glad_debug_glGetQueryObjectui64v = glad_debug_impl_glGetQueryObjectui64v; +PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv; +void APIENTRY glad_debug_impl_glGetQueryObjectuiv(GLuint arg0, GLenum arg1, GLuint * arg2) { + _pre_call_callback("glGetQueryObjectuiv", (void*)glGetQueryObjectuiv, 3, arg0, arg1, arg2); + glad_glGetQueryObjectuiv(arg0, arg1, arg2); + _post_call_callback("glGetQueryObjectuiv", (void*)glGetQueryObjectuiv, 3, arg0, arg1, arg2); + +} +PFNGLGETQUERYOBJECTUIVPROC glad_debug_glGetQueryObjectuiv = glad_debug_impl_glGetQueryObjectuiv; +PFNGLGETQUERYIVPROC glad_glGetQueryiv; +void APIENTRY glad_debug_impl_glGetQueryiv(GLenum arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetQueryiv", (void*)glGetQueryiv, 3, arg0, arg1, arg2); + glad_glGetQueryiv(arg0, arg1, arg2); + _post_call_callback("glGetQueryiv", (void*)glGetQueryiv, 3, arg0, arg1, arg2); + +} +PFNGLGETQUERYIVPROC glad_debug_glGetQueryiv = glad_debug_impl_glGetQueryiv; +PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv; +void APIENTRY glad_debug_impl_glGetRenderbufferParameteriv(GLenum arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetRenderbufferParameteriv", (void*)glGetRenderbufferParameteriv, 3, arg0, arg1, arg2); + glad_glGetRenderbufferParameteriv(arg0, arg1, arg2); + _post_call_callback("glGetRenderbufferParameteriv", (void*)glGetRenderbufferParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_debug_glGetRenderbufferParameteriv = glad_debug_impl_glGetRenderbufferParameteriv; +PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv; +void APIENTRY glad_debug_impl_glGetSamplerParameterIiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetSamplerParameterIiv", (void*)glGetSamplerParameterIiv, 3, arg0, arg1, arg2); + glad_glGetSamplerParameterIiv(arg0, arg1, arg2); + _post_call_callback("glGetSamplerParameterIiv", (void*)glGetSamplerParameterIiv, 3, arg0, arg1, arg2); + +} +PFNGLGETSAMPLERPARAMETERIIVPROC glad_debug_glGetSamplerParameterIiv = glad_debug_impl_glGetSamplerParameterIiv; +PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv; +void APIENTRY glad_debug_impl_glGetSamplerParameterIuiv(GLuint arg0, GLenum arg1, GLuint * arg2) { + _pre_call_callback("glGetSamplerParameterIuiv", (void*)glGetSamplerParameterIuiv, 3, arg0, arg1, arg2); + glad_glGetSamplerParameterIuiv(arg0, arg1, arg2); + _post_call_callback("glGetSamplerParameterIuiv", (void*)glGetSamplerParameterIuiv, 3, arg0, arg1, arg2); + +} +PFNGLGETSAMPLERPARAMETERIUIVPROC glad_debug_glGetSamplerParameterIuiv = glad_debug_impl_glGetSamplerParameterIuiv; +PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv; +void APIENTRY glad_debug_impl_glGetSamplerParameterfv(GLuint arg0, GLenum arg1, GLfloat * arg2) { + _pre_call_callback("glGetSamplerParameterfv", (void*)glGetSamplerParameterfv, 3, arg0, arg1, arg2); + glad_glGetSamplerParameterfv(arg0, arg1, arg2); + _post_call_callback("glGetSamplerParameterfv", (void*)glGetSamplerParameterfv, 3, arg0, arg1, arg2); + +} +PFNGLGETSAMPLERPARAMETERFVPROC glad_debug_glGetSamplerParameterfv = glad_debug_impl_glGetSamplerParameterfv; +PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv; +void APIENTRY glad_debug_impl_glGetSamplerParameteriv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetSamplerParameteriv", (void*)glGetSamplerParameteriv, 3, arg0, arg1, arg2); + glad_glGetSamplerParameteriv(arg0, arg1, arg2); + _post_call_callback("glGetSamplerParameteriv", (void*)glGetSamplerParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLGETSAMPLERPARAMETERIVPROC glad_debug_glGetSamplerParameteriv = glad_debug_impl_glGetSamplerParameteriv; +PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog; +void APIENTRY glad_debug_impl_glGetShaderInfoLog(GLuint arg0, GLsizei arg1, GLsizei * arg2, GLchar * arg3) { + _pre_call_callback("glGetShaderInfoLog", (void*)glGetShaderInfoLog, 4, arg0, arg1, arg2, arg3); + glad_glGetShaderInfoLog(arg0, arg1, arg2, arg3); + _post_call_callback("glGetShaderInfoLog", (void*)glGetShaderInfoLog, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETSHADERINFOLOGPROC glad_debug_glGetShaderInfoLog = glad_debug_impl_glGetShaderInfoLog; +PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource; +void APIENTRY glad_debug_impl_glGetShaderSource(GLuint arg0, GLsizei arg1, GLsizei * arg2, GLchar * arg3) { + _pre_call_callback("glGetShaderSource", (void*)glGetShaderSource, 4, arg0, arg1, arg2, arg3); + glad_glGetShaderSource(arg0, arg1, arg2, arg3); + _post_call_callback("glGetShaderSource", (void*)glGetShaderSource, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETSHADERSOURCEPROC glad_debug_glGetShaderSource = glad_debug_impl_glGetShaderSource; +PFNGLGETSHADERIVPROC glad_glGetShaderiv; +void APIENTRY glad_debug_impl_glGetShaderiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetShaderiv", (void*)glGetShaderiv, 3, arg0, arg1, arg2); + glad_glGetShaderiv(arg0, arg1, arg2); + _post_call_callback("glGetShaderiv", (void*)glGetShaderiv, 3, arg0, arg1, arg2); + +} +PFNGLGETSHADERIVPROC glad_debug_glGetShaderiv = glad_debug_impl_glGetShaderiv; +PFNGLGETSTRINGPROC glad_glGetString; +const GLubyte * APIENTRY glad_debug_impl_glGetString(GLenum arg0) { + const GLubyte * ret; + _pre_call_callback("glGetString", (void*)glGetString, 1, arg0); + ret = glad_glGetString(arg0); + _post_call_callback("glGetString", (void*)glGetString, 1, arg0); + return ret; +} +PFNGLGETSTRINGPROC glad_debug_glGetString = glad_debug_impl_glGetString; +PFNGLGETSTRINGIPROC glad_glGetStringi; +const GLubyte * APIENTRY glad_debug_impl_glGetStringi(GLenum arg0, GLuint arg1) { + const GLubyte * ret; + _pre_call_callback("glGetStringi", (void*)glGetStringi, 2, arg0, arg1); + ret = glad_glGetStringi(arg0, arg1); + _post_call_callback("glGetStringi", (void*)glGetStringi, 2, arg0, arg1); + return ret; +} +PFNGLGETSTRINGIPROC glad_debug_glGetStringi = glad_debug_impl_glGetStringi; +PFNGLGETSYNCIVPROC glad_glGetSynciv; +void APIENTRY glad_debug_impl_glGetSynciv(GLsync arg0, GLenum arg1, GLsizei arg2, GLsizei * arg3, GLint * arg4) { + _pre_call_callback("glGetSynciv", (void*)glGetSynciv, 5, arg0, arg1, arg2, arg3, arg4); + glad_glGetSynciv(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glGetSynciv", (void*)glGetSynciv, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLGETSYNCIVPROC glad_debug_glGetSynciv = glad_debug_impl_glGetSynciv; +PFNGLGETTEXIMAGEPROC glad_glGetTexImage; +void APIENTRY glad_debug_impl_glGetTexImage(GLenum arg0, GLint arg1, GLenum arg2, GLenum arg3, void * arg4) { + _pre_call_callback("glGetTexImage", (void*)glGetTexImage, 5, arg0, arg1, arg2, arg3, arg4); + glad_glGetTexImage(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glGetTexImage", (void*)glGetTexImage, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLGETTEXIMAGEPROC glad_debug_glGetTexImage = glad_debug_impl_glGetTexImage; +PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv; +void APIENTRY glad_debug_impl_glGetTexLevelParameterfv(GLenum arg0, GLint arg1, GLenum arg2, GLfloat * arg3) { + _pre_call_callback("glGetTexLevelParameterfv", (void*)glGetTexLevelParameterfv, 4, arg0, arg1, arg2, arg3); + glad_glGetTexLevelParameterfv(arg0, arg1, arg2, arg3); + _post_call_callback("glGetTexLevelParameterfv", (void*)glGetTexLevelParameterfv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETTEXLEVELPARAMETERFVPROC glad_debug_glGetTexLevelParameterfv = glad_debug_impl_glGetTexLevelParameterfv; +PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv; +void APIENTRY glad_debug_impl_glGetTexLevelParameteriv(GLenum arg0, GLint arg1, GLenum arg2, GLint * arg3) { + _pre_call_callback("glGetTexLevelParameteriv", (void*)glGetTexLevelParameteriv, 4, arg0, arg1, arg2, arg3); + glad_glGetTexLevelParameteriv(arg0, arg1, arg2, arg3); + _post_call_callback("glGetTexLevelParameteriv", (void*)glGetTexLevelParameteriv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETTEXLEVELPARAMETERIVPROC glad_debug_glGetTexLevelParameteriv = glad_debug_impl_glGetTexLevelParameteriv; +PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv; +void APIENTRY glad_debug_impl_glGetTexParameterIiv(GLenum arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetTexParameterIiv", (void*)glGetTexParameterIiv, 3, arg0, arg1, arg2); + glad_glGetTexParameterIiv(arg0, arg1, arg2); + _post_call_callback("glGetTexParameterIiv", (void*)glGetTexParameterIiv, 3, arg0, arg1, arg2); + +} +PFNGLGETTEXPARAMETERIIVPROC glad_debug_glGetTexParameterIiv = glad_debug_impl_glGetTexParameterIiv; +PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv; +void APIENTRY glad_debug_impl_glGetTexParameterIuiv(GLenum arg0, GLenum arg1, GLuint * arg2) { + _pre_call_callback("glGetTexParameterIuiv", (void*)glGetTexParameterIuiv, 3, arg0, arg1, arg2); + glad_glGetTexParameterIuiv(arg0, arg1, arg2); + _post_call_callback("glGetTexParameterIuiv", (void*)glGetTexParameterIuiv, 3, arg0, arg1, arg2); + +} +PFNGLGETTEXPARAMETERIUIVPROC glad_debug_glGetTexParameterIuiv = glad_debug_impl_glGetTexParameterIuiv; +PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv; +void APIENTRY glad_debug_impl_glGetTexParameterfv(GLenum arg0, GLenum arg1, GLfloat * arg2) { + _pre_call_callback("glGetTexParameterfv", (void*)glGetTexParameterfv, 3, arg0, arg1, arg2); + glad_glGetTexParameterfv(arg0, arg1, arg2); + _post_call_callback("glGetTexParameterfv", (void*)glGetTexParameterfv, 3, arg0, arg1, arg2); + +} +PFNGLGETTEXPARAMETERFVPROC glad_debug_glGetTexParameterfv = glad_debug_impl_glGetTexParameterfv; +PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv; +void APIENTRY glad_debug_impl_glGetTexParameteriv(GLenum arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetTexParameteriv", (void*)glGetTexParameteriv, 3, arg0, arg1, arg2); + glad_glGetTexParameteriv(arg0, arg1, arg2); + _post_call_callback("glGetTexParameteriv", (void*)glGetTexParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLGETTEXPARAMETERIVPROC glad_debug_glGetTexParameteriv = glad_debug_impl_glGetTexParameteriv; +PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying; +void APIENTRY glad_debug_impl_glGetTransformFeedbackVarying(GLuint arg0, GLuint arg1, GLsizei arg2, GLsizei * arg3, GLsizei * arg4, GLenum * arg5, GLchar * arg6) { + _pre_call_callback("glGetTransformFeedbackVarying", (void*)glGetTransformFeedbackVarying, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glGetTransformFeedbackVarying(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glGetTransformFeedbackVarying", (void*)glGetTransformFeedbackVarying, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_debug_glGetTransformFeedbackVarying = glad_debug_impl_glGetTransformFeedbackVarying; +PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex; +GLuint APIENTRY glad_debug_impl_glGetUniformBlockIndex(GLuint arg0, const GLchar * arg1) { + GLuint ret; + _pre_call_callback("glGetUniformBlockIndex", (void*)glGetUniformBlockIndex, 2, arg0, arg1); + ret = glad_glGetUniformBlockIndex(arg0, arg1); + _post_call_callback("glGetUniformBlockIndex", (void*)glGetUniformBlockIndex, 2, arg0, arg1); + return ret; +} +PFNGLGETUNIFORMBLOCKINDEXPROC glad_debug_glGetUniformBlockIndex = glad_debug_impl_glGetUniformBlockIndex; +PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices; +void APIENTRY glad_debug_impl_glGetUniformIndices(GLuint arg0, GLsizei arg1, const GLchar *const* arg2, GLuint * arg3) { + _pre_call_callback("glGetUniformIndices", (void*)glGetUniformIndices, 4, arg0, arg1, arg2, arg3); + glad_glGetUniformIndices(arg0, arg1, arg2, arg3); + _post_call_callback("glGetUniformIndices", (void*)glGetUniformIndices, 4, arg0, arg1, arg2, arg3); + +} +PFNGLGETUNIFORMINDICESPROC glad_debug_glGetUniformIndices = glad_debug_impl_glGetUniformIndices; +PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation; +GLint APIENTRY glad_debug_impl_glGetUniformLocation(GLuint arg0, const GLchar * arg1) { + GLint ret; + _pre_call_callback("glGetUniformLocation", (void*)glGetUniformLocation, 2, arg0, arg1); + ret = glad_glGetUniformLocation(arg0, arg1); + _post_call_callback("glGetUniformLocation", (void*)glGetUniformLocation, 2, arg0, arg1); + return ret; +} +PFNGLGETUNIFORMLOCATIONPROC glad_debug_glGetUniformLocation = glad_debug_impl_glGetUniformLocation; +PFNGLGETUNIFORMFVPROC glad_glGetUniformfv; +void APIENTRY glad_debug_impl_glGetUniformfv(GLuint arg0, GLint arg1, GLfloat * arg2) { + _pre_call_callback("glGetUniformfv", (void*)glGetUniformfv, 3, arg0, arg1, arg2); + glad_glGetUniformfv(arg0, arg1, arg2); + _post_call_callback("glGetUniformfv", (void*)glGetUniformfv, 3, arg0, arg1, arg2); + +} +PFNGLGETUNIFORMFVPROC glad_debug_glGetUniformfv = glad_debug_impl_glGetUniformfv; +PFNGLGETUNIFORMIVPROC glad_glGetUniformiv; +void APIENTRY glad_debug_impl_glGetUniformiv(GLuint arg0, GLint arg1, GLint * arg2) { + _pre_call_callback("glGetUniformiv", (void*)glGetUniformiv, 3, arg0, arg1, arg2); + glad_glGetUniformiv(arg0, arg1, arg2); + _post_call_callback("glGetUniformiv", (void*)glGetUniformiv, 3, arg0, arg1, arg2); + +} +PFNGLGETUNIFORMIVPROC glad_debug_glGetUniformiv = glad_debug_impl_glGetUniformiv; +PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv; +void APIENTRY glad_debug_impl_glGetUniformuiv(GLuint arg0, GLint arg1, GLuint * arg2) { + _pre_call_callback("glGetUniformuiv", (void*)glGetUniformuiv, 3, arg0, arg1, arg2); + glad_glGetUniformuiv(arg0, arg1, arg2); + _post_call_callback("glGetUniformuiv", (void*)glGetUniformuiv, 3, arg0, arg1, arg2); + +} +PFNGLGETUNIFORMUIVPROC glad_debug_glGetUniformuiv = glad_debug_impl_glGetUniformuiv; +PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv; +void APIENTRY glad_debug_impl_glGetVertexAttribIiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetVertexAttribIiv", (void*)glGetVertexAttribIiv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribIiv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribIiv", (void*)glGetVertexAttribIiv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBIIVPROC glad_debug_glGetVertexAttribIiv = glad_debug_impl_glGetVertexAttribIiv; +PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv; +void APIENTRY glad_debug_impl_glGetVertexAttribIuiv(GLuint arg0, GLenum arg1, GLuint * arg2) { + _pre_call_callback("glGetVertexAttribIuiv", (void*)glGetVertexAttribIuiv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribIuiv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribIuiv", (void*)glGetVertexAttribIuiv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBIUIVPROC glad_debug_glGetVertexAttribIuiv = glad_debug_impl_glGetVertexAttribIuiv; +PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv; +void APIENTRY glad_debug_impl_glGetVertexAttribPointerv(GLuint arg0, GLenum arg1, void ** arg2) { + _pre_call_callback("glGetVertexAttribPointerv", (void*)glGetVertexAttribPointerv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribPointerv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribPointerv", (void*)glGetVertexAttribPointerv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBPOINTERVPROC glad_debug_glGetVertexAttribPointerv = glad_debug_impl_glGetVertexAttribPointerv; +PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv; +void APIENTRY glad_debug_impl_glGetVertexAttribdv(GLuint arg0, GLenum arg1, GLdouble * arg2) { + _pre_call_callback("glGetVertexAttribdv", (void*)glGetVertexAttribdv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribdv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribdv", (void*)glGetVertexAttribdv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBDVPROC glad_debug_glGetVertexAttribdv = glad_debug_impl_glGetVertexAttribdv; +PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv; +void APIENTRY glad_debug_impl_glGetVertexAttribfv(GLuint arg0, GLenum arg1, GLfloat * arg2) { + _pre_call_callback("glGetVertexAttribfv", (void*)glGetVertexAttribfv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribfv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribfv", (void*)glGetVertexAttribfv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBFVPROC glad_debug_glGetVertexAttribfv = glad_debug_impl_glGetVertexAttribfv; +PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv; +void APIENTRY glad_debug_impl_glGetVertexAttribiv(GLuint arg0, GLenum arg1, GLint * arg2) { + _pre_call_callback("glGetVertexAttribiv", (void*)glGetVertexAttribiv, 3, arg0, arg1, arg2); + glad_glGetVertexAttribiv(arg0, arg1, arg2); + _post_call_callback("glGetVertexAttribiv", (void*)glGetVertexAttribiv, 3, arg0, arg1, arg2); + +} +PFNGLGETVERTEXATTRIBIVPROC glad_debug_glGetVertexAttribiv = glad_debug_impl_glGetVertexAttribiv; +PFNGLHINTPROC glad_glHint; +void APIENTRY glad_debug_impl_glHint(GLenum arg0, GLenum arg1) { + _pre_call_callback("glHint", (void*)glHint, 2, arg0, arg1); + glad_glHint(arg0, arg1); + _post_call_callback("glHint", (void*)glHint, 2, arg0, arg1); + +} +PFNGLHINTPROC glad_debug_glHint = glad_debug_impl_glHint; +PFNGLISBUFFERPROC glad_glIsBuffer; +GLboolean APIENTRY glad_debug_impl_glIsBuffer(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsBuffer", (void*)glIsBuffer, 1, arg0); + ret = glad_glIsBuffer(arg0); + _post_call_callback("glIsBuffer", (void*)glIsBuffer, 1, arg0); + return ret; +} +PFNGLISBUFFERPROC glad_debug_glIsBuffer = glad_debug_impl_glIsBuffer; +PFNGLISENABLEDPROC glad_glIsEnabled; +GLboolean APIENTRY glad_debug_impl_glIsEnabled(GLenum arg0) { + GLboolean ret; + _pre_call_callback("glIsEnabled", (void*)glIsEnabled, 1, arg0); + ret = glad_glIsEnabled(arg0); + _post_call_callback("glIsEnabled", (void*)glIsEnabled, 1, arg0); + return ret; +} +PFNGLISENABLEDPROC glad_debug_glIsEnabled = glad_debug_impl_glIsEnabled; +PFNGLISENABLEDIPROC glad_glIsEnabledi; +GLboolean APIENTRY glad_debug_impl_glIsEnabledi(GLenum arg0, GLuint arg1) { + GLboolean ret; + _pre_call_callback("glIsEnabledi", (void*)glIsEnabledi, 2, arg0, arg1); + ret = glad_glIsEnabledi(arg0, arg1); + _post_call_callback("glIsEnabledi", (void*)glIsEnabledi, 2, arg0, arg1); + return ret; +} +PFNGLISENABLEDIPROC glad_debug_glIsEnabledi = glad_debug_impl_glIsEnabledi; +PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer; +GLboolean APIENTRY glad_debug_impl_glIsFramebuffer(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsFramebuffer", (void*)glIsFramebuffer, 1, arg0); + ret = glad_glIsFramebuffer(arg0); + _post_call_callback("glIsFramebuffer", (void*)glIsFramebuffer, 1, arg0); + return ret; +} +PFNGLISFRAMEBUFFERPROC glad_debug_glIsFramebuffer = glad_debug_impl_glIsFramebuffer; +PFNGLISPROGRAMPROC glad_glIsProgram; +GLboolean APIENTRY glad_debug_impl_glIsProgram(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsProgram", (void*)glIsProgram, 1, arg0); + ret = glad_glIsProgram(arg0); + _post_call_callback("glIsProgram", (void*)glIsProgram, 1, arg0); + return ret; +} +PFNGLISPROGRAMPROC glad_debug_glIsProgram = glad_debug_impl_glIsProgram; +PFNGLISQUERYPROC glad_glIsQuery; +GLboolean APIENTRY glad_debug_impl_glIsQuery(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsQuery", (void*)glIsQuery, 1, arg0); + ret = glad_glIsQuery(arg0); + _post_call_callback("glIsQuery", (void*)glIsQuery, 1, arg0); + return ret; +} +PFNGLISQUERYPROC glad_debug_glIsQuery = glad_debug_impl_glIsQuery; +PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer; +GLboolean APIENTRY glad_debug_impl_glIsRenderbuffer(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsRenderbuffer", (void*)glIsRenderbuffer, 1, arg0); + ret = glad_glIsRenderbuffer(arg0); + _post_call_callback("glIsRenderbuffer", (void*)glIsRenderbuffer, 1, arg0); + return ret; +} +PFNGLISRENDERBUFFERPROC glad_debug_glIsRenderbuffer = glad_debug_impl_glIsRenderbuffer; +PFNGLISSAMPLERPROC glad_glIsSampler; +GLboolean APIENTRY glad_debug_impl_glIsSampler(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsSampler", (void*)glIsSampler, 1, arg0); + ret = glad_glIsSampler(arg0); + _post_call_callback("glIsSampler", (void*)glIsSampler, 1, arg0); + return ret; +} +PFNGLISSAMPLERPROC glad_debug_glIsSampler = glad_debug_impl_glIsSampler; +PFNGLISSHADERPROC glad_glIsShader; +GLboolean APIENTRY glad_debug_impl_glIsShader(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsShader", (void*)glIsShader, 1, arg0); + ret = glad_glIsShader(arg0); + _post_call_callback("glIsShader", (void*)glIsShader, 1, arg0); + return ret; +} +PFNGLISSHADERPROC glad_debug_glIsShader = glad_debug_impl_glIsShader; +PFNGLISSYNCPROC glad_glIsSync; +GLboolean APIENTRY glad_debug_impl_glIsSync(GLsync arg0) { + GLboolean ret; + _pre_call_callback("glIsSync", (void*)glIsSync, 1, arg0); + ret = glad_glIsSync(arg0); + _post_call_callback("glIsSync", (void*)glIsSync, 1, arg0); + return ret; +} +PFNGLISSYNCPROC glad_debug_glIsSync = glad_debug_impl_glIsSync; +PFNGLISTEXTUREPROC glad_glIsTexture; +GLboolean APIENTRY glad_debug_impl_glIsTexture(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsTexture", (void*)glIsTexture, 1, arg0); + ret = glad_glIsTexture(arg0); + _post_call_callback("glIsTexture", (void*)glIsTexture, 1, arg0); + return ret; +} +PFNGLISTEXTUREPROC glad_debug_glIsTexture = glad_debug_impl_glIsTexture; +PFNGLISVERTEXARRAYPROC glad_glIsVertexArray; +GLboolean APIENTRY glad_debug_impl_glIsVertexArray(GLuint arg0) { + GLboolean ret; + _pre_call_callback("glIsVertexArray", (void*)glIsVertexArray, 1, arg0); + ret = glad_glIsVertexArray(arg0); + _post_call_callback("glIsVertexArray", (void*)glIsVertexArray, 1, arg0); + return ret; +} +PFNGLISVERTEXARRAYPROC glad_debug_glIsVertexArray = glad_debug_impl_glIsVertexArray; +PFNGLLINEWIDTHPROC glad_glLineWidth; +void APIENTRY glad_debug_impl_glLineWidth(GLfloat arg0) { + _pre_call_callback("glLineWidth", (void*)glLineWidth, 1, arg0); + glad_glLineWidth(arg0); + _post_call_callback("glLineWidth", (void*)glLineWidth, 1, arg0); + +} +PFNGLLINEWIDTHPROC glad_debug_glLineWidth = glad_debug_impl_glLineWidth; +PFNGLLINKPROGRAMPROC glad_glLinkProgram; +void APIENTRY glad_debug_impl_glLinkProgram(GLuint arg0) { + _pre_call_callback("glLinkProgram", (void*)glLinkProgram, 1, arg0); + glad_glLinkProgram(arg0); + _post_call_callback("glLinkProgram", (void*)glLinkProgram, 1, arg0); + +} +PFNGLLINKPROGRAMPROC glad_debug_glLinkProgram = glad_debug_impl_glLinkProgram; +PFNGLLOGICOPPROC glad_glLogicOp; +void APIENTRY glad_debug_impl_glLogicOp(GLenum arg0) { + _pre_call_callback("glLogicOp", (void*)glLogicOp, 1, arg0); + glad_glLogicOp(arg0); + _post_call_callback("glLogicOp", (void*)glLogicOp, 1, arg0); + +} +PFNGLLOGICOPPROC glad_debug_glLogicOp = glad_debug_impl_glLogicOp; +PFNGLMAPBUFFERPROC glad_glMapBuffer; +void * APIENTRY glad_debug_impl_glMapBuffer(GLenum arg0, GLenum arg1) { + void * ret; + _pre_call_callback("glMapBuffer", (void*)glMapBuffer, 2, arg0, arg1); + ret = glad_glMapBuffer(arg0, arg1); + _post_call_callback("glMapBuffer", (void*)glMapBuffer, 2, arg0, arg1); + return ret; +} +PFNGLMAPBUFFERPROC glad_debug_glMapBuffer = glad_debug_impl_glMapBuffer; +PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange; +void * APIENTRY glad_debug_impl_glMapBufferRange(GLenum arg0, GLintptr arg1, GLsizeiptr arg2, GLbitfield arg3) { + void * ret; + _pre_call_callback("glMapBufferRange", (void*)glMapBufferRange, 4, arg0, arg1, arg2, arg3); + ret = glad_glMapBufferRange(arg0, arg1, arg2, arg3); + _post_call_callback("glMapBufferRange", (void*)glMapBufferRange, 4, arg0, arg1, arg2, arg3); + return ret; +} +PFNGLMAPBUFFERRANGEPROC glad_debug_glMapBufferRange = glad_debug_impl_glMapBufferRange; +PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays; +void APIENTRY glad_debug_impl_glMultiDrawArrays(GLenum arg0, const GLint * arg1, const GLsizei * arg2, GLsizei arg3) { + _pre_call_callback("glMultiDrawArrays", (void*)glMultiDrawArrays, 4, arg0, arg1, arg2, arg3); + glad_glMultiDrawArrays(arg0, arg1, arg2, arg3); + _post_call_callback("glMultiDrawArrays", (void*)glMultiDrawArrays, 4, arg0, arg1, arg2, arg3); + +} +PFNGLMULTIDRAWARRAYSPROC glad_debug_glMultiDrawArrays = glad_debug_impl_glMultiDrawArrays; +PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements; +void APIENTRY glad_debug_impl_glMultiDrawElements(GLenum arg0, const GLsizei * arg1, GLenum arg2, const void *const* arg3, GLsizei arg4) { + _pre_call_callback("glMultiDrawElements", (void*)glMultiDrawElements, 5, arg0, arg1, arg2, arg3, arg4); + glad_glMultiDrawElements(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glMultiDrawElements", (void*)glMultiDrawElements, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLMULTIDRAWELEMENTSPROC glad_debug_glMultiDrawElements = glad_debug_impl_glMultiDrawElements; +PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex; +void APIENTRY glad_debug_impl_glMultiDrawElementsBaseVertex(GLenum arg0, const GLsizei * arg1, GLenum arg2, const void *const* arg3, GLsizei arg4, const GLint * arg5) { + _pre_call_callback("glMultiDrawElementsBaseVertex", (void*)glMultiDrawElementsBaseVertex, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glMultiDrawElementsBaseVertex(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glMultiDrawElementsBaseVertex", (void*)glMultiDrawElementsBaseVertex, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_debug_glMultiDrawElementsBaseVertex = glad_debug_impl_glMultiDrawElementsBaseVertex; +PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui; +void APIENTRY glad_debug_impl_glMultiTexCoordP1ui(GLenum arg0, GLenum arg1, GLuint arg2) { + _pre_call_callback("glMultiTexCoordP1ui", (void*)glMultiTexCoordP1ui, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP1ui(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP1ui", (void*)glMultiTexCoordP1ui, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP1UIPROC glad_debug_glMultiTexCoordP1ui = glad_debug_impl_glMultiTexCoordP1ui; +PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv; +void APIENTRY glad_debug_impl_glMultiTexCoordP1uiv(GLenum arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glMultiTexCoordP1uiv", (void*)glMultiTexCoordP1uiv, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP1uiv(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP1uiv", (void*)glMultiTexCoordP1uiv, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP1UIVPROC glad_debug_glMultiTexCoordP1uiv = glad_debug_impl_glMultiTexCoordP1uiv; +PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui; +void APIENTRY glad_debug_impl_glMultiTexCoordP2ui(GLenum arg0, GLenum arg1, GLuint arg2) { + _pre_call_callback("glMultiTexCoordP2ui", (void*)glMultiTexCoordP2ui, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP2ui(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP2ui", (void*)glMultiTexCoordP2ui, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP2UIPROC glad_debug_glMultiTexCoordP2ui = glad_debug_impl_glMultiTexCoordP2ui; +PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv; +void APIENTRY glad_debug_impl_glMultiTexCoordP2uiv(GLenum arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glMultiTexCoordP2uiv", (void*)glMultiTexCoordP2uiv, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP2uiv(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP2uiv", (void*)glMultiTexCoordP2uiv, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP2UIVPROC glad_debug_glMultiTexCoordP2uiv = glad_debug_impl_glMultiTexCoordP2uiv; +PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui; +void APIENTRY glad_debug_impl_glMultiTexCoordP3ui(GLenum arg0, GLenum arg1, GLuint arg2) { + _pre_call_callback("glMultiTexCoordP3ui", (void*)glMultiTexCoordP3ui, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP3ui(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP3ui", (void*)glMultiTexCoordP3ui, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP3UIPROC glad_debug_glMultiTexCoordP3ui = glad_debug_impl_glMultiTexCoordP3ui; +PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv; +void APIENTRY glad_debug_impl_glMultiTexCoordP3uiv(GLenum arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glMultiTexCoordP3uiv", (void*)glMultiTexCoordP3uiv, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP3uiv(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP3uiv", (void*)glMultiTexCoordP3uiv, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP3UIVPROC glad_debug_glMultiTexCoordP3uiv = glad_debug_impl_glMultiTexCoordP3uiv; +PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui; +void APIENTRY glad_debug_impl_glMultiTexCoordP4ui(GLenum arg0, GLenum arg1, GLuint arg2) { + _pre_call_callback("glMultiTexCoordP4ui", (void*)glMultiTexCoordP4ui, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP4ui(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP4ui", (void*)glMultiTexCoordP4ui, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP4UIPROC glad_debug_glMultiTexCoordP4ui = glad_debug_impl_glMultiTexCoordP4ui; +PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv; +void APIENTRY glad_debug_impl_glMultiTexCoordP4uiv(GLenum arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glMultiTexCoordP4uiv", (void*)glMultiTexCoordP4uiv, 3, arg0, arg1, arg2); + glad_glMultiTexCoordP4uiv(arg0, arg1, arg2); + _post_call_callback("glMultiTexCoordP4uiv", (void*)glMultiTexCoordP4uiv, 3, arg0, arg1, arg2); + +} +PFNGLMULTITEXCOORDP4UIVPROC glad_debug_glMultiTexCoordP4uiv = glad_debug_impl_glMultiTexCoordP4uiv; +PFNGLNORMALP3UIPROC glad_glNormalP3ui; +void APIENTRY glad_debug_impl_glNormalP3ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glNormalP3ui", (void*)glNormalP3ui, 2, arg0, arg1); + glad_glNormalP3ui(arg0, arg1); + _post_call_callback("glNormalP3ui", (void*)glNormalP3ui, 2, arg0, arg1); + +} +PFNGLNORMALP3UIPROC glad_debug_glNormalP3ui = glad_debug_impl_glNormalP3ui; +PFNGLNORMALP3UIVPROC glad_glNormalP3uiv; +void APIENTRY glad_debug_impl_glNormalP3uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glNormalP3uiv", (void*)glNormalP3uiv, 2, arg0, arg1); + glad_glNormalP3uiv(arg0, arg1); + _post_call_callback("glNormalP3uiv", (void*)glNormalP3uiv, 2, arg0, arg1); + +} +PFNGLNORMALP3UIVPROC glad_debug_glNormalP3uiv = glad_debug_impl_glNormalP3uiv; +PFNGLPIXELSTOREFPROC glad_glPixelStoref; +void APIENTRY glad_debug_impl_glPixelStoref(GLenum arg0, GLfloat arg1) { + _pre_call_callback("glPixelStoref", (void*)glPixelStoref, 2, arg0, arg1); + glad_glPixelStoref(arg0, arg1); + _post_call_callback("glPixelStoref", (void*)glPixelStoref, 2, arg0, arg1); + +} +PFNGLPIXELSTOREFPROC glad_debug_glPixelStoref = glad_debug_impl_glPixelStoref; +PFNGLPIXELSTOREIPROC glad_glPixelStorei; +void APIENTRY glad_debug_impl_glPixelStorei(GLenum arg0, GLint arg1) { + _pre_call_callback("glPixelStorei", (void*)glPixelStorei, 2, arg0, arg1); + glad_glPixelStorei(arg0, arg1); + _post_call_callback("glPixelStorei", (void*)glPixelStorei, 2, arg0, arg1); + +} +PFNGLPIXELSTOREIPROC glad_debug_glPixelStorei = glad_debug_impl_glPixelStorei; +PFNGLPOINTPARAMETERFPROC glad_glPointParameterf; +void APIENTRY glad_debug_impl_glPointParameterf(GLenum arg0, GLfloat arg1) { + _pre_call_callback("glPointParameterf", (void*)glPointParameterf, 2, arg0, arg1); + glad_glPointParameterf(arg0, arg1); + _post_call_callback("glPointParameterf", (void*)glPointParameterf, 2, arg0, arg1); + +} +PFNGLPOINTPARAMETERFPROC glad_debug_glPointParameterf = glad_debug_impl_glPointParameterf; +PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv; +void APIENTRY glad_debug_impl_glPointParameterfv(GLenum arg0, const GLfloat * arg1) { + _pre_call_callback("glPointParameterfv", (void*)glPointParameterfv, 2, arg0, arg1); + glad_glPointParameterfv(arg0, arg1); + _post_call_callback("glPointParameterfv", (void*)glPointParameterfv, 2, arg0, arg1); + +} +PFNGLPOINTPARAMETERFVPROC glad_debug_glPointParameterfv = glad_debug_impl_glPointParameterfv; +PFNGLPOINTPARAMETERIPROC glad_glPointParameteri; +void APIENTRY glad_debug_impl_glPointParameteri(GLenum arg0, GLint arg1) { + _pre_call_callback("glPointParameteri", (void*)glPointParameteri, 2, arg0, arg1); + glad_glPointParameteri(arg0, arg1); + _post_call_callback("glPointParameteri", (void*)glPointParameteri, 2, arg0, arg1); + +} +PFNGLPOINTPARAMETERIPROC glad_debug_glPointParameteri = glad_debug_impl_glPointParameteri; +PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv; +void APIENTRY glad_debug_impl_glPointParameteriv(GLenum arg0, const GLint * arg1) { + _pre_call_callback("glPointParameteriv", (void*)glPointParameteriv, 2, arg0, arg1); + glad_glPointParameteriv(arg0, arg1); + _post_call_callback("glPointParameteriv", (void*)glPointParameteriv, 2, arg0, arg1); + +} +PFNGLPOINTPARAMETERIVPROC glad_debug_glPointParameteriv = glad_debug_impl_glPointParameteriv; +PFNGLPOINTSIZEPROC glad_glPointSize; +void APIENTRY glad_debug_impl_glPointSize(GLfloat arg0) { + _pre_call_callback("glPointSize", (void*)glPointSize, 1, arg0); + glad_glPointSize(arg0); + _post_call_callback("glPointSize", (void*)glPointSize, 1, arg0); + +} +PFNGLPOINTSIZEPROC glad_debug_glPointSize = glad_debug_impl_glPointSize; +PFNGLPOLYGONMODEPROC glad_glPolygonMode; +void APIENTRY glad_debug_impl_glPolygonMode(GLenum arg0, GLenum arg1) { + _pre_call_callback("glPolygonMode", (void*)glPolygonMode, 2, arg0, arg1); + glad_glPolygonMode(arg0, arg1); + _post_call_callback("glPolygonMode", (void*)glPolygonMode, 2, arg0, arg1); + +} +PFNGLPOLYGONMODEPROC glad_debug_glPolygonMode = glad_debug_impl_glPolygonMode; +PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset; +void APIENTRY glad_debug_impl_glPolygonOffset(GLfloat arg0, GLfloat arg1) { + _pre_call_callback("glPolygonOffset", (void*)glPolygonOffset, 2, arg0, arg1); + glad_glPolygonOffset(arg0, arg1); + _post_call_callback("glPolygonOffset", (void*)glPolygonOffset, 2, arg0, arg1); + +} +PFNGLPOLYGONOFFSETPROC glad_debug_glPolygonOffset = glad_debug_impl_glPolygonOffset; +PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex; +void APIENTRY glad_debug_impl_glPrimitiveRestartIndex(GLuint arg0) { + _pre_call_callback("glPrimitiveRestartIndex", (void*)glPrimitiveRestartIndex, 1, arg0); + glad_glPrimitiveRestartIndex(arg0); + _post_call_callback("glPrimitiveRestartIndex", (void*)glPrimitiveRestartIndex, 1, arg0); + +} +PFNGLPRIMITIVERESTARTINDEXPROC glad_debug_glPrimitiveRestartIndex = glad_debug_impl_glPrimitiveRestartIndex; +PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex; +void APIENTRY glad_debug_impl_glProvokingVertex(GLenum arg0) { + _pre_call_callback("glProvokingVertex", (void*)glProvokingVertex, 1, arg0); + glad_glProvokingVertex(arg0); + _post_call_callback("glProvokingVertex", (void*)glProvokingVertex, 1, arg0); + +} +PFNGLPROVOKINGVERTEXPROC glad_debug_glProvokingVertex = glad_debug_impl_glProvokingVertex; +PFNGLQUERYCOUNTERPROC glad_glQueryCounter; +void APIENTRY glad_debug_impl_glQueryCounter(GLuint arg0, GLenum arg1) { + _pre_call_callback("glQueryCounter", (void*)glQueryCounter, 2, arg0, arg1); + glad_glQueryCounter(arg0, arg1); + _post_call_callback("glQueryCounter", (void*)glQueryCounter, 2, arg0, arg1); + +} +PFNGLQUERYCOUNTERPROC glad_debug_glQueryCounter = glad_debug_impl_glQueryCounter; +PFNGLREADBUFFERPROC glad_glReadBuffer; +void APIENTRY glad_debug_impl_glReadBuffer(GLenum arg0) { + _pre_call_callback("glReadBuffer", (void*)glReadBuffer, 1, arg0); + glad_glReadBuffer(arg0); + _post_call_callback("glReadBuffer", (void*)glReadBuffer, 1, arg0); + +} +PFNGLREADBUFFERPROC glad_debug_glReadBuffer = glad_debug_impl_glReadBuffer; +PFNGLREADPIXELSPROC glad_glReadPixels; +void APIENTRY glad_debug_impl_glReadPixels(GLint arg0, GLint arg1, GLsizei arg2, GLsizei arg3, GLenum arg4, GLenum arg5, void * arg6) { + _pre_call_callback("glReadPixels", (void*)glReadPixels, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glReadPixels(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glReadPixels", (void*)glReadPixels, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLREADPIXELSPROC glad_debug_glReadPixels = glad_debug_impl_glReadPixels; +PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage; +void APIENTRY glad_debug_impl_glRenderbufferStorage(GLenum arg0, GLenum arg1, GLsizei arg2, GLsizei arg3) { + _pre_call_callback("glRenderbufferStorage", (void*)glRenderbufferStorage, 4, arg0, arg1, arg2, arg3); + glad_glRenderbufferStorage(arg0, arg1, arg2, arg3); + _post_call_callback("glRenderbufferStorage", (void*)glRenderbufferStorage, 4, arg0, arg1, arg2, arg3); + +} +PFNGLRENDERBUFFERSTORAGEPROC glad_debug_glRenderbufferStorage = glad_debug_impl_glRenderbufferStorage; +PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample; +void APIENTRY glad_debug_impl_glRenderbufferStorageMultisample(GLenum arg0, GLsizei arg1, GLenum arg2, GLsizei arg3, GLsizei arg4) { + _pre_call_callback("glRenderbufferStorageMultisample", (void*)glRenderbufferStorageMultisample, 5, arg0, arg1, arg2, arg3, arg4); + glad_glRenderbufferStorageMultisample(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glRenderbufferStorageMultisample", (void*)glRenderbufferStorageMultisample, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_debug_glRenderbufferStorageMultisample = glad_debug_impl_glRenderbufferStorageMultisample; +PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage; +void APIENTRY glad_debug_impl_glSampleCoverage(GLfloat arg0, GLboolean arg1) { + _pre_call_callback("glSampleCoverage", (void*)glSampleCoverage, 2, arg0, arg1); + glad_glSampleCoverage(arg0, arg1); + _post_call_callback("glSampleCoverage", (void*)glSampleCoverage, 2, arg0, arg1); + +} +PFNGLSAMPLECOVERAGEPROC glad_debug_glSampleCoverage = glad_debug_impl_glSampleCoverage; +PFNGLSAMPLEMASKIPROC glad_glSampleMaski; +void APIENTRY glad_debug_impl_glSampleMaski(GLuint arg0, GLbitfield arg1) { + _pre_call_callback("glSampleMaski", (void*)glSampleMaski, 2, arg0, arg1); + glad_glSampleMaski(arg0, arg1); + _post_call_callback("glSampleMaski", (void*)glSampleMaski, 2, arg0, arg1); + +} +PFNGLSAMPLEMASKIPROC glad_debug_glSampleMaski = glad_debug_impl_glSampleMaski; +PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv; +void APIENTRY glad_debug_impl_glSamplerParameterIiv(GLuint arg0, GLenum arg1, const GLint * arg2) { + _pre_call_callback("glSamplerParameterIiv", (void*)glSamplerParameterIiv, 3, arg0, arg1, arg2); + glad_glSamplerParameterIiv(arg0, arg1, arg2); + _post_call_callback("glSamplerParameterIiv", (void*)glSamplerParameterIiv, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERIIVPROC glad_debug_glSamplerParameterIiv = glad_debug_impl_glSamplerParameterIiv; +PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv; +void APIENTRY glad_debug_impl_glSamplerParameterIuiv(GLuint arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glSamplerParameterIuiv", (void*)glSamplerParameterIuiv, 3, arg0, arg1, arg2); + glad_glSamplerParameterIuiv(arg0, arg1, arg2); + _post_call_callback("glSamplerParameterIuiv", (void*)glSamplerParameterIuiv, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERIUIVPROC glad_debug_glSamplerParameterIuiv = glad_debug_impl_glSamplerParameterIuiv; +PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf; +void APIENTRY glad_debug_impl_glSamplerParameterf(GLuint arg0, GLenum arg1, GLfloat arg2) { + _pre_call_callback("glSamplerParameterf", (void*)glSamplerParameterf, 3, arg0, arg1, arg2); + glad_glSamplerParameterf(arg0, arg1, arg2); + _post_call_callback("glSamplerParameterf", (void*)glSamplerParameterf, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERFPROC glad_debug_glSamplerParameterf = glad_debug_impl_glSamplerParameterf; +PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv; +void APIENTRY glad_debug_impl_glSamplerParameterfv(GLuint arg0, GLenum arg1, const GLfloat * arg2) { + _pre_call_callback("glSamplerParameterfv", (void*)glSamplerParameterfv, 3, arg0, arg1, arg2); + glad_glSamplerParameterfv(arg0, arg1, arg2); + _post_call_callback("glSamplerParameterfv", (void*)glSamplerParameterfv, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERFVPROC glad_debug_glSamplerParameterfv = glad_debug_impl_glSamplerParameterfv; +PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri; +void APIENTRY glad_debug_impl_glSamplerParameteri(GLuint arg0, GLenum arg1, GLint arg2) { + _pre_call_callback("glSamplerParameteri", (void*)glSamplerParameteri, 3, arg0, arg1, arg2); + glad_glSamplerParameteri(arg0, arg1, arg2); + _post_call_callback("glSamplerParameteri", (void*)glSamplerParameteri, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERIPROC glad_debug_glSamplerParameteri = glad_debug_impl_glSamplerParameteri; +PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv; +void APIENTRY glad_debug_impl_glSamplerParameteriv(GLuint arg0, GLenum arg1, const GLint * arg2) { + _pre_call_callback("glSamplerParameteriv", (void*)glSamplerParameteriv, 3, arg0, arg1, arg2); + glad_glSamplerParameteriv(arg0, arg1, arg2); + _post_call_callback("glSamplerParameteriv", (void*)glSamplerParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLSAMPLERPARAMETERIVPROC glad_debug_glSamplerParameteriv = glad_debug_impl_glSamplerParameteriv; +PFNGLSCISSORPROC glad_glScissor; +void APIENTRY glad_debug_impl_glScissor(GLint arg0, GLint arg1, GLsizei arg2, GLsizei arg3) { + _pre_call_callback("glScissor", (void*)glScissor, 4, arg0, arg1, arg2, arg3); + glad_glScissor(arg0, arg1, arg2, arg3); + _post_call_callback("glScissor", (void*)glScissor, 4, arg0, arg1, arg2, arg3); + +} +PFNGLSCISSORPROC glad_debug_glScissor = glad_debug_impl_glScissor; +PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui; +void APIENTRY glad_debug_impl_glSecondaryColorP3ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glSecondaryColorP3ui", (void*)glSecondaryColorP3ui, 2, arg0, arg1); + glad_glSecondaryColorP3ui(arg0, arg1); + _post_call_callback("glSecondaryColorP3ui", (void*)glSecondaryColorP3ui, 2, arg0, arg1); + +} +PFNGLSECONDARYCOLORP3UIPROC glad_debug_glSecondaryColorP3ui = glad_debug_impl_glSecondaryColorP3ui; +PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv; +void APIENTRY glad_debug_impl_glSecondaryColorP3uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glSecondaryColorP3uiv", (void*)glSecondaryColorP3uiv, 2, arg0, arg1); + glad_glSecondaryColorP3uiv(arg0, arg1); + _post_call_callback("glSecondaryColorP3uiv", (void*)glSecondaryColorP3uiv, 2, arg0, arg1); + +} +PFNGLSECONDARYCOLORP3UIVPROC glad_debug_glSecondaryColorP3uiv = glad_debug_impl_glSecondaryColorP3uiv; +PFNGLSHADERSOURCEPROC glad_glShaderSource; +void APIENTRY glad_debug_impl_glShaderSource(GLuint arg0, GLsizei arg1, const GLchar *const* arg2, const GLint * arg3) { + _pre_call_callback("glShaderSource", (void*)glShaderSource, 4, arg0, arg1, arg2, arg3); + glad_glShaderSource(arg0, arg1, arg2, arg3); + _post_call_callback("glShaderSource", (void*)glShaderSource, 4, arg0, arg1, arg2, arg3); + +} +PFNGLSHADERSOURCEPROC glad_debug_glShaderSource = glad_debug_impl_glShaderSource; +PFNGLSTENCILFUNCPROC glad_glStencilFunc; +void APIENTRY glad_debug_impl_glStencilFunc(GLenum arg0, GLint arg1, GLuint arg2) { + _pre_call_callback("glStencilFunc", (void*)glStencilFunc, 3, arg0, arg1, arg2); + glad_glStencilFunc(arg0, arg1, arg2); + _post_call_callback("glStencilFunc", (void*)glStencilFunc, 3, arg0, arg1, arg2); + +} +PFNGLSTENCILFUNCPROC glad_debug_glStencilFunc = glad_debug_impl_glStencilFunc; +PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate; +void APIENTRY glad_debug_impl_glStencilFuncSeparate(GLenum arg0, GLenum arg1, GLint arg2, GLuint arg3) { + _pre_call_callback("glStencilFuncSeparate", (void*)glStencilFuncSeparate, 4, arg0, arg1, arg2, arg3); + glad_glStencilFuncSeparate(arg0, arg1, arg2, arg3); + _post_call_callback("glStencilFuncSeparate", (void*)glStencilFuncSeparate, 4, arg0, arg1, arg2, arg3); + +} +PFNGLSTENCILFUNCSEPARATEPROC glad_debug_glStencilFuncSeparate = glad_debug_impl_glStencilFuncSeparate; +PFNGLSTENCILMASKPROC glad_glStencilMask; +void APIENTRY glad_debug_impl_glStencilMask(GLuint arg0) { + _pre_call_callback("glStencilMask", (void*)glStencilMask, 1, arg0); + glad_glStencilMask(arg0); + _post_call_callback("glStencilMask", (void*)glStencilMask, 1, arg0); + +} +PFNGLSTENCILMASKPROC glad_debug_glStencilMask = glad_debug_impl_glStencilMask; +PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate; +void APIENTRY glad_debug_impl_glStencilMaskSeparate(GLenum arg0, GLuint arg1) { + _pre_call_callback("glStencilMaskSeparate", (void*)glStencilMaskSeparate, 2, arg0, arg1); + glad_glStencilMaskSeparate(arg0, arg1); + _post_call_callback("glStencilMaskSeparate", (void*)glStencilMaskSeparate, 2, arg0, arg1); + +} +PFNGLSTENCILMASKSEPARATEPROC glad_debug_glStencilMaskSeparate = glad_debug_impl_glStencilMaskSeparate; +PFNGLSTENCILOPPROC glad_glStencilOp; +void APIENTRY glad_debug_impl_glStencilOp(GLenum arg0, GLenum arg1, GLenum arg2) { + _pre_call_callback("glStencilOp", (void*)glStencilOp, 3, arg0, arg1, arg2); + glad_glStencilOp(arg0, arg1, arg2); + _post_call_callback("glStencilOp", (void*)glStencilOp, 3, arg0, arg1, arg2); + +} +PFNGLSTENCILOPPROC glad_debug_glStencilOp = glad_debug_impl_glStencilOp; +PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate; +void APIENTRY glad_debug_impl_glStencilOpSeparate(GLenum arg0, GLenum arg1, GLenum arg2, GLenum arg3) { + _pre_call_callback("glStencilOpSeparate", (void*)glStencilOpSeparate, 4, arg0, arg1, arg2, arg3); + glad_glStencilOpSeparate(arg0, arg1, arg2, arg3); + _post_call_callback("glStencilOpSeparate", (void*)glStencilOpSeparate, 4, arg0, arg1, arg2, arg3); + +} +PFNGLSTENCILOPSEPARATEPROC glad_debug_glStencilOpSeparate = glad_debug_impl_glStencilOpSeparate; +PFNGLTEXBUFFERPROC glad_glTexBuffer; +void APIENTRY glad_debug_impl_glTexBuffer(GLenum arg0, GLenum arg1, GLuint arg2) { + _pre_call_callback("glTexBuffer", (void*)glTexBuffer, 3, arg0, arg1, arg2); + glad_glTexBuffer(arg0, arg1, arg2); + _post_call_callback("glTexBuffer", (void*)glTexBuffer, 3, arg0, arg1, arg2); + +} +PFNGLTEXBUFFERPROC glad_debug_glTexBuffer = glad_debug_impl_glTexBuffer; +PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui; +void APIENTRY glad_debug_impl_glTexCoordP1ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glTexCoordP1ui", (void*)glTexCoordP1ui, 2, arg0, arg1); + glad_glTexCoordP1ui(arg0, arg1); + _post_call_callback("glTexCoordP1ui", (void*)glTexCoordP1ui, 2, arg0, arg1); + +} +PFNGLTEXCOORDP1UIPROC glad_debug_glTexCoordP1ui = glad_debug_impl_glTexCoordP1ui; +PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv; +void APIENTRY glad_debug_impl_glTexCoordP1uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glTexCoordP1uiv", (void*)glTexCoordP1uiv, 2, arg0, arg1); + glad_glTexCoordP1uiv(arg0, arg1); + _post_call_callback("glTexCoordP1uiv", (void*)glTexCoordP1uiv, 2, arg0, arg1); + +} +PFNGLTEXCOORDP1UIVPROC glad_debug_glTexCoordP1uiv = glad_debug_impl_glTexCoordP1uiv; +PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui; +void APIENTRY glad_debug_impl_glTexCoordP2ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glTexCoordP2ui", (void*)glTexCoordP2ui, 2, arg0, arg1); + glad_glTexCoordP2ui(arg0, arg1); + _post_call_callback("glTexCoordP2ui", (void*)glTexCoordP2ui, 2, arg0, arg1); + +} +PFNGLTEXCOORDP2UIPROC glad_debug_glTexCoordP2ui = glad_debug_impl_glTexCoordP2ui; +PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv; +void APIENTRY glad_debug_impl_glTexCoordP2uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glTexCoordP2uiv", (void*)glTexCoordP2uiv, 2, arg0, arg1); + glad_glTexCoordP2uiv(arg0, arg1); + _post_call_callback("glTexCoordP2uiv", (void*)glTexCoordP2uiv, 2, arg0, arg1); + +} +PFNGLTEXCOORDP2UIVPROC glad_debug_glTexCoordP2uiv = glad_debug_impl_glTexCoordP2uiv; +PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui; +void APIENTRY glad_debug_impl_glTexCoordP3ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glTexCoordP3ui", (void*)glTexCoordP3ui, 2, arg0, arg1); + glad_glTexCoordP3ui(arg0, arg1); + _post_call_callback("glTexCoordP3ui", (void*)glTexCoordP3ui, 2, arg0, arg1); + +} +PFNGLTEXCOORDP3UIPROC glad_debug_glTexCoordP3ui = glad_debug_impl_glTexCoordP3ui; +PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv; +void APIENTRY glad_debug_impl_glTexCoordP3uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glTexCoordP3uiv", (void*)glTexCoordP3uiv, 2, arg0, arg1); + glad_glTexCoordP3uiv(arg0, arg1); + _post_call_callback("glTexCoordP3uiv", (void*)glTexCoordP3uiv, 2, arg0, arg1); + +} +PFNGLTEXCOORDP3UIVPROC glad_debug_glTexCoordP3uiv = glad_debug_impl_glTexCoordP3uiv; +PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui; +void APIENTRY glad_debug_impl_glTexCoordP4ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glTexCoordP4ui", (void*)glTexCoordP4ui, 2, arg0, arg1); + glad_glTexCoordP4ui(arg0, arg1); + _post_call_callback("glTexCoordP4ui", (void*)glTexCoordP4ui, 2, arg0, arg1); + +} +PFNGLTEXCOORDP4UIPROC glad_debug_glTexCoordP4ui = glad_debug_impl_glTexCoordP4ui; +PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv; +void APIENTRY glad_debug_impl_glTexCoordP4uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glTexCoordP4uiv", (void*)glTexCoordP4uiv, 2, arg0, arg1); + glad_glTexCoordP4uiv(arg0, arg1); + _post_call_callback("glTexCoordP4uiv", (void*)glTexCoordP4uiv, 2, arg0, arg1); + +} +PFNGLTEXCOORDP4UIVPROC glad_debug_glTexCoordP4uiv = glad_debug_impl_glTexCoordP4uiv; +PFNGLTEXIMAGE1DPROC glad_glTexImage1D; +void APIENTRY glad_debug_impl_glTexImage1D(GLenum arg0, GLint arg1, GLint arg2, GLsizei arg3, GLint arg4, GLenum arg5, GLenum arg6, const void * arg7) { + _pre_call_callback("glTexImage1D", (void*)glTexImage1D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + glad_glTexImage1D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + _post_call_callback("glTexImage1D", (void*)glTexImage1D, 8, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + +} +PFNGLTEXIMAGE1DPROC glad_debug_glTexImage1D = glad_debug_impl_glTexImage1D; +PFNGLTEXIMAGE2DPROC glad_glTexImage2D; +void APIENTRY glad_debug_impl_glTexImage2D(GLenum arg0, GLint arg1, GLint arg2, GLsizei arg3, GLsizei arg4, GLint arg5, GLenum arg6, GLenum arg7, const void * arg8) { + _pre_call_callback("glTexImage2D", (void*)glTexImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + glad_glTexImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + _post_call_callback("glTexImage2D", (void*)glTexImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + +} +PFNGLTEXIMAGE2DPROC glad_debug_glTexImage2D = glad_debug_impl_glTexImage2D; +PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample; +void APIENTRY glad_debug_impl_glTexImage2DMultisample(GLenum arg0, GLsizei arg1, GLenum arg2, GLsizei arg3, GLsizei arg4, GLboolean arg5) { + _pre_call_callback("glTexImage2DMultisample", (void*)glTexImage2DMultisample, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glTexImage2DMultisample(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glTexImage2DMultisample", (void*)glTexImage2DMultisample, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_debug_glTexImage2DMultisample = glad_debug_impl_glTexImage2DMultisample; +PFNGLTEXIMAGE3DPROC glad_glTexImage3D; +void APIENTRY glad_debug_impl_glTexImage3D(GLenum arg0, GLint arg1, GLint arg2, GLsizei arg3, GLsizei arg4, GLsizei arg5, GLint arg6, GLenum arg7, GLenum arg8, const void * arg9) { + _pre_call_callback("glTexImage3D", (void*)glTexImage3D, 10, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + glad_glTexImage3D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + _post_call_callback("glTexImage3D", (void*)glTexImage3D, 10, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); + +} +PFNGLTEXIMAGE3DPROC glad_debug_glTexImage3D = glad_debug_impl_glTexImage3D; +PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample; +void APIENTRY glad_debug_impl_glTexImage3DMultisample(GLenum arg0, GLsizei arg1, GLenum arg2, GLsizei arg3, GLsizei arg4, GLsizei arg5, GLboolean arg6) { + _pre_call_callback("glTexImage3DMultisample", (void*)glTexImage3DMultisample, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glTexImage3DMultisample(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glTexImage3DMultisample", (void*)glTexImage3DMultisample, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_debug_glTexImage3DMultisample = glad_debug_impl_glTexImage3DMultisample; +PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv; +void APIENTRY glad_debug_impl_glTexParameterIiv(GLenum arg0, GLenum arg1, const GLint * arg2) { + _pre_call_callback("glTexParameterIiv", (void*)glTexParameterIiv, 3, arg0, arg1, arg2); + glad_glTexParameterIiv(arg0, arg1, arg2); + _post_call_callback("glTexParameterIiv", (void*)glTexParameterIiv, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERIIVPROC glad_debug_glTexParameterIiv = glad_debug_impl_glTexParameterIiv; +PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv; +void APIENTRY glad_debug_impl_glTexParameterIuiv(GLenum arg0, GLenum arg1, const GLuint * arg2) { + _pre_call_callback("glTexParameterIuiv", (void*)glTexParameterIuiv, 3, arg0, arg1, arg2); + glad_glTexParameterIuiv(arg0, arg1, arg2); + _post_call_callback("glTexParameterIuiv", (void*)glTexParameterIuiv, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERIUIVPROC glad_debug_glTexParameterIuiv = glad_debug_impl_glTexParameterIuiv; +PFNGLTEXPARAMETERFPROC glad_glTexParameterf; +void APIENTRY glad_debug_impl_glTexParameterf(GLenum arg0, GLenum arg1, GLfloat arg2) { + _pre_call_callback("glTexParameterf", (void*)glTexParameterf, 3, arg0, arg1, arg2); + glad_glTexParameterf(arg0, arg1, arg2); + _post_call_callback("glTexParameterf", (void*)glTexParameterf, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERFPROC glad_debug_glTexParameterf = glad_debug_impl_glTexParameterf; +PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv; +void APIENTRY glad_debug_impl_glTexParameterfv(GLenum arg0, GLenum arg1, const GLfloat * arg2) { + _pre_call_callback("glTexParameterfv", (void*)glTexParameterfv, 3, arg0, arg1, arg2); + glad_glTexParameterfv(arg0, arg1, arg2); + _post_call_callback("glTexParameterfv", (void*)glTexParameterfv, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERFVPROC glad_debug_glTexParameterfv = glad_debug_impl_glTexParameterfv; +PFNGLTEXPARAMETERIPROC glad_glTexParameteri; +void APIENTRY glad_debug_impl_glTexParameteri(GLenum arg0, GLenum arg1, GLint arg2) { + _pre_call_callback("glTexParameteri", (void*)glTexParameteri, 3, arg0, arg1, arg2); + glad_glTexParameteri(arg0, arg1, arg2); + _post_call_callback("glTexParameteri", (void*)glTexParameteri, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERIPROC glad_debug_glTexParameteri = glad_debug_impl_glTexParameteri; +PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv; +void APIENTRY glad_debug_impl_glTexParameteriv(GLenum arg0, GLenum arg1, const GLint * arg2) { + _pre_call_callback("glTexParameteriv", (void*)glTexParameteriv, 3, arg0, arg1, arg2); + glad_glTexParameteriv(arg0, arg1, arg2); + _post_call_callback("glTexParameteriv", (void*)glTexParameteriv, 3, arg0, arg1, arg2); + +} +PFNGLTEXPARAMETERIVPROC glad_debug_glTexParameteriv = glad_debug_impl_glTexParameteriv; +PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D; +void APIENTRY glad_debug_impl_glTexSubImage1D(GLenum arg0, GLint arg1, GLint arg2, GLsizei arg3, GLenum arg4, GLenum arg5, const void * arg6) { + _pre_call_callback("glTexSubImage1D", (void*)glTexSubImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + glad_glTexSubImage1D(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + _post_call_callback("glTexSubImage1D", (void*)glTexSubImage1D, 7, arg0, arg1, arg2, arg3, arg4, arg5, arg6); + +} +PFNGLTEXSUBIMAGE1DPROC glad_debug_glTexSubImage1D = glad_debug_impl_glTexSubImage1D; +PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D; +void APIENTRY glad_debug_impl_glTexSubImage2D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLsizei arg4, GLsizei arg5, GLenum arg6, GLenum arg7, const void * arg8) { + _pre_call_callback("glTexSubImage2D", (void*)glTexSubImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + glad_glTexSubImage2D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + _post_call_callback("glTexSubImage2D", (void*)glTexSubImage2D, 9, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + +} +PFNGLTEXSUBIMAGE2DPROC glad_debug_glTexSubImage2D = glad_debug_impl_glTexSubImage2D; +PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D; +void APIENTRY glad_debug_impl_glTexSubImage3D(GLenum arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4, GLsizei arg5, GLsizei arg6, GLsizei arg7, GLenum arg8, GLenum arg9, const void * arg10) { + _pre_call_callback("glTexSubImage3D", (void*)glTexSubImage3D, 11, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + glad_glTexSubImage3D(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + _post_call_callback("glTexSubImage3D", (void*)glTexSubImage3D, 11, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); + +} +PFNGLTEXSUBIMAGE3DPROC glad_debug_glTexSubImage3D = glad_debug_impl_glTexSubImage3D; +PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings; +void APIENTRY glad_debug_impl_glTransformFeedbackVaryings(GLuint arg0, GLsizei arg1, const GLchar *const* arg2, GLenum arg3) { + _pre_call_callback("glTransformFeedbackVaryings", (void*)glTransformFeedbackVaryings, 4, arg0, arg1, arg2, arg3); + glad_glTransformFeedbackVaryings(arg0, arg1, arg2, arg3); + _post_call_callback("glTransformFeedbackVaryings", (void*)glTransformFeedbackVaryings, 4, arg0, arg1, arg2, arg3); + +} +PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_debug_glTransformFeedbackVaryings = glad_debug_impl_glTransformFeedbackVaryings; +PFNGLUNIFORM1FPROC glad_glUniform1f; +void APIENTRY glad_debug_impl_glUniform1f(GLint arg0, GLfloat arg1) { + _pre_call_callback("glUniform1f", (void*)glUniform1f, 2, arg0, arg1); + glad_glUniform1f(arg0, arg1); + _post_call_callback("glUniform1f", (void*)glUniform1f, 2, arg0, arg1); + +} +PFNGLUNIFORM1FPROC glad_debug_glUniform1f = glad_debug_impl_glUniform1f; +PFNGLUNIFORM1FVPROC glad_glUniform1fv; +void APIENTRY glad_debug_impl_glUniform1fv(GLint arg0, GLsizei arg1, const GLfloat * arg2) { + _pre_call_callback("glUniform1fv", (void*)glUniform1fv, 3, arg0, arg1, arg2); + glad_glUniform1fv(arg0, arg1, arg2); + _post_call_callback("glUniform1fv", (void*)glUniform1fv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM1FVPROC glad_debug_glUniform1fv = glad_debug_impl_glUniform1fv; +PFNGLUNIFORM1IPROC glad_glUniform1i; +void APIENTRY glad_debug_impl_glUniform1i(GLint arg0, GLint arg1) { + _pre_call_callback("glUniform1i", (void*)glUniform1i, 2, arg0, arg1); + glad_glUniform1i(arg0, arg1); + _post_call_callback("glUniform1i", (void*)glUniform1i, 2, arg0, arg1); + +} +PFNGLUNIFORM1IPROC glad_debug_glUniform1i = glad_debug_impl_glUniform1i; +PFNGLUNIFORM1IVPROC glad_glUniform1iv; +void APIENTRY glad_debug_impl_glUniform1iv(GLint arg0, GLsizei arg1, const GLint * arg2) { + _pre_call_callback("glUniform1iv", (void*)glUniform1iv, 3, arg0, arg1, arg2); + glad_glUniform1iv(arg0, arg1, arg2); + _post_call_callback("glUniform1iv", (void*)glUniform1iv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM1IVPROC glad_debug_glUniform1iv = glad_debug_impl_glUniform1iv; +PFNGLUNIFORM1UIPROC glad_glUniform1ui; +void APIENTRY glad_debug_impl_glUniform1ui(GLint arg0, GLuint arg1) { + _pre_call_callback("glUniform1ui", (void*)glUniform1ui, 2, arg0, arg1); + glad_glUniform1ui(arg0, arg1); + _post_call_callback("glUniform1ui", (void*)glUniform1ui, 2, arg0, arg1); + +} +PFNGLUNIFORM1UIPROC glad_debug_glUniform1ui = glad_debug_impl_glUniform1ui; +PFNGLUNIFORM1UIVPROC glad_glUniform1uiv; +void APIENTRY glad_debug_impl_glUniform1uiv(GLint arg0, GLsizei arg1, const GLuint * arg2) { + _pre_call_callback("glUniform1uiv", (void*)glUniform1uiv, 3, arg0, arg1, arg2); + glad_glUniform1uiv(arg0, arg1, arg2); + _post_call_callback("glUniform1uiv", (void*)glUniform1uiv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM1UIVPROC glad_debug_glUniform1uiv = glad_debug_impl_glUniform1uiv; +PFNGLUNIFORM2FPROC glad_glUniform2f; +void APIENTRY glad_debug_impl_glUniform2f(GLint arg0, GLfloat arg1, GLfloat arg2) { + _pre_call_callback("glUniform2f", (void*)glUniform2f, 3, arg0, arg1, arg2); + glad_glUniform2f(arg0, arg1, arg2); + _post_call_callback("glUniform2f", (void*)glUniform2f, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2FPROC glad_debug_glUniform2f = glad_debug_impl_glUniform2f; +PFNGLUNIFORM2FVPROC glad_glUniform2fv; +void APIENTRY glad_debug_impl_glUniform2fv(GLint arg0, GLsizei arg1, const GLfloat * arg2) { + _pre_call_callback("glUniform2fv", (void*)glUniform2fv, 3, arg0, arg1, arg2); + glad_glUniform2fv(arg0, arg1, arg2); + _post_call_callback("glUniform2fv", (void*)glUniform2fv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2FVPROC glad_debug_glUniform2fv = glad_debug_impl_glUniform2fv; +PFNGLUNIFORM2IPROC glad_glUniform2i; +void APIENTRY glad_debug_impl_glUniform2i(GLint arg0, GLint arg1, GLint arg2) { + _pre_call_callback("glUniform2i", (void*)glUniform2i, 3, arg0, arg1, arg2); + glad_glUniform2i(arg0, arg1, arg2); + _post_call_callback("glUniform2i", (void*)glUniform2i, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2IPROC glad_debug_glUniform2i = glad_debug_impl_glUniform2i; +PFNGLUNIFORM2IVPROC glad_glUniform2iv; +void APIENTRY glad_debug_impl_glUniform2iv(GLint arg0, GLsizei arg1, const GLint * arg2) { + _pre_call_callback("glUniform2iv", (void*)glUniform2iv, 3, arg0, arg1, arg2); + glad_glUniform2iv(arg0, arg1, arg2); + _post_call_callback("glUniform2iv", (void*)glUniform2iv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2IVPROC glad_debug_glUniform2iv = glad_debug_impl_glUniform2iv; +PFNGLUNIFORM2UIPROC glad_glUniform2ui; +void APIENTRY glad_debug_impl_glUniform2ui(GLint arg0, GLuint arg1, GLuint arg2) { + _pre_call_callback("glUniform2ui", (void*)glUniform2ui, 3, arg0, arg1, arg2); + glad_glUniform2ui(arg0, arg1, arg2); + _post_call_callback("glUniform2ui", (void*)glUniform2ui, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2UIPROC glad_debug_glUniform2ui = glad_debug_impl_glUniform2ui; +PFNGLUNIFORM2UIVPROC glad_glUniform2uiv; +void APIENTRY glad_debug_impl_glUniform2uiv(GLint arg0, GLsizei arg1, const GLuint * arg2) { + _pre_call_callback("glUniform2uiv", (void*)glUniform2uiv, 3, arg0, arg1, arg2); + glad_glUniform2uiv(arg0, arg1, arg2); + _post_call_callback("glUniform2uiv", (void*)glUniform2uiv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM2UIVPROC glad_debug_glUniform2uiv = glad_debug_impl_glUniform2uiv; +PFNGLUNIFORM3FPROC glad_glUniform3f; +void APIENTRY glad_debug_impl_glUniform3f(GLint arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3) { + _pre_call_callback("glUniform3f", (void*)glUniform3f, 4, arg0, arg1, arg2, arg3); + glad_glUniform3f(arg0, arg1, arg2, arg3); + _post_call_callback("glUniform3f", (void*)glUniform3f, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORM3FPROC glad_debug_glUniform3f = glad_debug_impl_glUniform3f; +PFNGLUNIFORM3FVPROC glad_glUniform3fv; +void APIENTRY glad_debug_impl_glUniform3fv(GLint arg0, GLsizei arg1, const GLfloat * arg2) { + _pre_call_callback("glUniform3fv", (void*)glUniform3fv, 3, arg0, arg1, arg2); + glad_glUniform3fv(arg0, arg1, arg2); + _post_call_callback("glUniform3fv", (void*)glUniform3fv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM3FVPROC glad_debug_glUniform3fv = glad_debug_impl_glUniform3fv; +PFNGLUNIFORM3IPROC glad_glUniform3i; +void APIENTRY glad_debug_impl_glUniform3i(GLint arg0, GLint arg1, GLint arg2, GLint arg3) { + _pre_call_callback("glUniform3i", (void*)glUniform3i, 4, arg0, arg1, arg2, arg3); + glad_glUniform3i(arg0, arg1, arg2, arg3); + _post_call_callback("glUniform3i", (void*)glUniform3i, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORM3IPROC glad_debug_glUniform3i = glad_debug_impl_glUniform3i; +PFNGLUNIFORM3IVPROC glad_glUniform3iv; +void APIENTRY glad_debug_impl_glUniform3iv(GLint arg0, GLsizei arg1, const GLint * arg2) { + _pre_call_callback("glUniform3iv", (void*)glUniform3iv, 3, arg0, arg1, arg2); + glad_glUniform3iv(arg0, arg1, arg2); + _post_call_callback("glUniform3iv", (void*)glUniform3iv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM3IVPROC glad_debug_glUniform3iv = glad_debug_impl_glUniform3iv; +PFNGLUNIFORM3UIPROC glad_glUniform3ui; +void APIENTRY glad_debug_impl_glUniform3ui(GLint arg0, GLuint arg1, GLuint arg2, GLuint arg3) { + _pre_call_callback("glUniform3ui", (void*)glUniform3ui, 4, arg0, arg1, arg2, arg3); + glad_glUniform3ui(arg0, arg1, arg2, arg3); + _post_call_callback("glUniform3ui", (void*)glUniform3ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORM3UIPROC glad_debug_glUniform3ui = glad_debug_impl_glUniform3ui; +PFNGLUNIFORM3UIVPROC glad_glUniform3uiv; +void APIENTRY glad_debug_impl_glUniform3uiv(GLint arg0, GLsizei arg1, const GLuint * arg2) { + _pre_call_callback("glUniform3uiv", (void*)glUniform3uiv, 3, arg0, arg1, arg2); + glad_glUniform3uiv(arg0, arg1, arg2); + _post_call_callback("glUniform3uiv", (void*)glUniform3uiv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM3UIVPROC glad_debug_glUniform3uiv = glad_debug_impl_glUniform3uiv; +PFNGLUNIFORM4FPROC glad_glUniform4f; +void APIENTRY glad_debug_impl_glUniform4f(GLint arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3, GLfloat arg4) { + _pre_call_callback("glUniform4f", (void*)glUniform4f, 5, arg0, arg1, arg2, arg3, arg4); + glad_glUniform4f(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glUniform4f", (void*)glUniform4f, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLUNIFORM4FPROC glad_debug_glUniform4f = glad_debug_impl_glUniform4f; +PFNGLUNIFORM4FVPROC glad_glUniform4fv; +void APIENTRY glad_debug_impl_glUniform4fv(GLint arg0, GLsizei arg1, const GLfloat * arg2) { + _pre_call_callback("glUniform4fv", (void*)glUniform4fv, 3, arg0, arg1, arg2); + glad_glUniform4fv(arg0, arg1, arg2); + _post_call_callback("glUniform4fv", (void*)glUniform4fv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM4FVPROC glad_debug_glUniform4fv = glad_debug_impl_glUniform4fv; +PFNGLUNIFORM4IPROC glad_glUniform4i; +void APIENTRY glad_debug_impl_glUniform4i(GLint arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4) { + _pre_call_callback("glUniform4i", (void*)glUniform4i, 5, arg0, arg1, arg2, arg3, arg4); + glad_glUniform4i(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glUniform4i", (void*)glUniform4i, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLUNIFORM4IPROC glad_debug_glUniform4i = glad_debug_impl_glUniform4i; +PFNGLUNIFORM4IVPROC glad_glUniform4iv; +void APIENTRY glad_debug_impl_glUniform4iv(GLint arg0, GLsizei arg1, const GLint * arg2) { + _pre_call_callback("glUniform4iv", (void*)glUniform4iv, 3, arg0, arg1, arg2); + glad_glUniform4iv(arg0, arg1, arg2); + _post_call_callback("glUniform4iv", (void*)glUniform4iv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM4IVPROC glad_debug_glUniform4iv = glad_debug_impl_glUniform4iv; +PFNGLUNIFORM4UIPROC glad_glUniform4ui; +void APIENTRY glad_debug_impl_glUniform4ui(GLint arg0, GLuint arg1, GLuint arg2, GLuint arg3, GLuint arg4) { + _pre_call_callback("glUniform4ui", (void*)glUniform4ui, 5, arg0, arg1, arg2, arg3, arg4); + glad_glUniform4ui(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glUniform4ui", (void*)glUniform4ui, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLUNIFORM4UIPROC glad_debug_glUniform4ui = glad_debug_impl_glUniform4ui; +PFNGLUNIFORM4UIVPROC glad_glUniform4uiv; +void APIENTRY glad_debug_impl_glUniform4uiv(GLint arg0, GLsizei arg1, const GLuint * arg2) { + _pre_call_callback("glUniform4uiv", (void*)glUniform4uiv, 3, arg0, arg1, arg2); + glad_glUniform4uiv(arg0, arg1, arg2); + _post_call_callback("glUniform4uiv", (void*)glUniform4uiv, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORM4UIVPROC glad_debug_glUniform4uiv = glad_debug_impl_glUniform4uiv; +PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding; +void APIENTRY glad_debug_impl_glUniformBlockBinding(GLuint arg0, GLuint arg1, GLuint arg2) { + _pre_call_callback("glUniformBlockBinding", (void*)glUniformBlockBinding, 3, arg0, arg1, arg2); + glad_glUniformBlockBinding(arg0, arg1, arg2); + _post_call_callback("glUniformBlockBinding", (void*)glUniformBlockBinding, 3, arg0, arg1, arg2); + +} +PFNGLUNIFORMBLOCKBINDINGPROC glad_debug_glUniformBlockBinding = glad_debug_impl_glUniformBlockBinding; +PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv; +void APIENTRY glad_debug_impl_glUniformMatrix2fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix2fv", (void*)glUniformMatrix2fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix2fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix2fv", (void*)glUniformMatrix2fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX2FVPROC glad_debug_glUniformMatrix2fv = glad_debug_impl_glUniformMatrix2fv; +PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv; +void APIENTRY glad_debug_impl_glUniformMatrix2x3fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix2x3fv", (void*)glUniformMatrix2x3fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix2x3fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix2x3fv", (void*)glUniformMatrix2x3fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX2X3FVPROC glad_debug_glUniformMatrix2x3fv = glad_debug_impl_glUniformMatrix2x3fv; +PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv; +void APIENTRY glad_debug_impl_glUniformMatrix2x4fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix2x4fv", (void*)glUniformMatrix2x4fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix2x4fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix2x4fv", (void*)glUniformMatrix2x4fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX2X4FVPROC glad_debug_glUniformMatrix2x4fv = glad_debug_impl_glUniformMatrix2x4fv; +PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv; +void APIENTRY glad_debug_impl_glUniformMatrix3fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix3fv", (void*)glUniformMatrix3fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix3fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix3fv", (void*)glUniformMatrix3fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX3FVPROC glad_debug_glUniformMatrix3fv = glad_debug_impl_glUniformMatrix3fv; +PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv; +void APIENTRY glad_debug_impl_glUniformMatrix3x2fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix3x2fv", (void*)glUniformMatrix3x2fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix3x2fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix3x2fv", (void*)glUniformMatrix3x2fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX3X2FVPROC glad_debug_glUniformMatrix3x2fv = glad_debug_impl_glUniformMatrix3x2fv; +PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv; +void APIENTRY glad_debug_impl_glUniformMatrix3x4fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix3x4fv", (void*)glUniformMatrix3x4fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix3x4fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix3x4fv", (void*)glUniformMatrix3x4fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX3X4FVPROC glad_debug_glUniformMatrix3x4fv = glad_debug_impl_glUniformMatrix3x4fv; +PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv; +void APIENTRY glad_debug_impl_glUniformMatrix4fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix4fv", (void*)glUniformMatrix4fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix4fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix4fv", (void*)glUniformMatrix4fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX4FVPROC glad_debug_glUniformMatrix4fv = glad_debug_impl_glUniformMatrix4fv; +PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv; +void APIENTRY glad_debug_impl_glUniformMatrix4x2fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix4x2fv", (void*)glUniformMatrix4x2fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix4x2fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix4x2fv", (void*)glUniformMatrix4x2fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX4X2FVPROC glad_debug_glUniformMatrix4x2fv = glad_debug_impl_glUniformMatrix4x2fv; +PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv; +void APIENTRY glad_debug_impl_glUniformMatrix4x3fv(GLint arg0, GLsizei arg1, GLboolean arg2, const GLfloat * arg3) { + _pre_call_callback("glUniformMatrix4x3fv", (void*)glUniformMatrix4x3fv, 4, arg0, arg1, arg2, arg3); + glad_glUniformMatrix4x3fv(arg0, arg1, arg2, arg3); + _post_call_callback("glUniformMatrix4x3fv", (void*)glUniformMatrix4x3fv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLUNIFORMMATRIX4X3FVPROC glad_debug_glUniformMatrix4x3fv = glad_debug_impl_glUniformMatrix4x3fv; +PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer; +GLboolean APIENTRY glad_debug_impl_glUnmapBuffer(GLenum arg0) { + GLboolean ret; + _pre_call_callback("glUnmapBuffer", (void*)glUnmapBuffer, 1, arg0); + ret = glad_glUnmapBuffer(arg0); + _post_call_callback("glUnmapBuffer", (void*)glUnmapBuffer, 1, arg0); + return ret; +} +PFNGLUNMAPBUFFERPROC glad_debug_glUnmapBuffer = glad_debug_impl_glUnmapBuffer; +PFNGLUSEPROGRAMPROC glad_glUseProgram; +void APIENTRY glad_debug_impl_glUseProgram(GLuint arg0) { + _pre_call_callback("glUseProgram", (void*)glUseProgram, 1, arg0); + glad_glUseProgram(arg0); + _post_call_callback("glUseProgram", (void*)glUseProgram, 1, arg0); + +} +PFNGLUSEPROGRAMPROC glad_debug_glUseProgram = glad_debug_impl_glUseProgram; +PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram; +void APIENTRY glad_debug_impl_glValidateProgram(GLuint arg0) { + _pre_call_callback("glValidateProgram", (void*)glValidateProgram, 1, arg0); + glad_glValidateProgram(arg0); + _post_call_callback("glValidateProgram", (void*)glValidateProgram, 1, arg0); + +} +PFNGLVALIDATEPROGRAMPROC glad_debug_glValidateProgram = glad_debug_impl_glValidateProgram; +PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d; +void APIENTRY glad_debug_impl_glVertexAttrib1d(GLuint arg0, GLdouble arg1) { + _pre_call_callback("glVertexAttrib1d", (void*)glVertexAttrib1d, 2, arg0, arg1); + glad_glVertexAttrib1d(arg0, arg1); + _post_call_callback("glVertexAttrib1d", (void*)glVertexAttrib1d, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1DPROC glad_debug_glVertexAttrib1d = glad_debug_impl_glVertexAttrib1d; +PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv; +void APIENTRY glad_debug_impl_glVertexAttrib1dv(GLuint arg0, const GLdouble * arg1) { + _pre_call_callback("glVertexAttrib1dv", (void*)glVertexAttrib1dv, 2, arg0, arg1); + glad_glVertexAttrib1dv(arg0, arg1); + _post_call_callback("glVertexAttrib1dv", (void*)glVertexAttrib1dv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1DVPROC glad_debug_glVertexAttrib1dv = glad_debug_impl_glVertexAttrib1dv; +PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f; +void APIENTRY glad_debug_impl_glVertexAttrib1f(GLuint arg0, GLfloat arg1) { + _pre_call_callback("glVertexAttrib1f", (void*)glVertexAttrib1f, 2, arg0, arg1); + glad_glVertexAttrib1f(arg0, arg1); + _post_call_callback("glVertexAttrib1f", (void*)glVertexAttrib1f, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1FPROC glad_debug_glVertexAttrib1f = glad_debug_impl_glVertexAttrib1f; +PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv; +void APIENTRY glad_debug_impl_glVertexAttrib1fv(GLuint arg0, const GLfloat * arg1) { + _pre_call_callback("glVertexAttrib1fv", (void*)glVertexAttrib1fv, 2, arg0, arg1); + glad_glVertexAttrib1fv(arg0, arg1); + _post_call_callback("glVertexAttrib1fv", (void*)glVertexAttrib1fv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1FVPROC glad_debug_glVertexAttrib1fv = glad_debug_impl_glVertexAttrib1fv; +PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s; +void APIENTRY glad_debug_impl_glVertexAttrib1s(GLuint arg0, GLshort arg1) { + _pre_call_callback("glVertexAttrib1s", (void*)glVertexAttrib1s, 2, arg0, arg1); + glad_glVertexAttrib1s(arg0, arg1); + _post_call_callback("glVertexAttrib1s", (void*)glVertexAttrib1s, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1SPROC glad_debug_glVertexAttrib1s = glad_debug_impl_glVertexAttrib1s; +PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv; +void APIENTRY glad_debug_impl_glVertexAttrib1sv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttrib1sv", (void*)glVertexAttrib1sv, 2, arg0, arg1); + glad_glVertexAttrib1sv(arg0, arg1); + _post_call_callback("glVertexAttrib1sv", (void*)glVertexAttrib1sv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB1SVPROC glad_debug_glVertexAttrib1sv = glad_debug_impl_glVertexAttrib1sv; +PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d; +void APIENTRY glad_debug_impl_glVertexAttrib2d(GLuint arg0, GLdouble arg1, GLdouble arg2) { + _pre_call_callback("glVertexAttrib2d", (void*)glVertexAttrib2d, 3, arg0, arg1, arg2); + glad_glVertexAttrib2d(arg0, arg1, arg2); + _post_call_callback("glVertexAttrib2d", (void*)glVertexAttrib2d, 3, arg0, arg1, arg2); + +} +PFNGLVERTEXATTRIB2DPROC glad_debug_glVertexAttrib2d = glad_debug_impl_glVertexAttrib2d; +PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv; +void APIENTRY glad_debug_impl_glVertexAttrib2dv(GLuint arg0, const GLdouble * arg1) { + _pre_call_callback("glVertexAttrib2dv", (void*)glVertexAttrib2dv, 2, arg0, arg1); + glad_glVertexAttrib2dv(arg0, arg1); + _post_call_callback("glVertexAttrib2dv", (void*)glVertexAttrib2dv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB2DVPROC glad_debug_glVertexAttrib2dv = glad_debug_impl_glVertexAttrib2dv; +PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f; +void APIENTRY glad_debug_impl_glVertexAttrib2f(GLuint arg0, GLfloat arg1, GLfloat arg2) { + _pre_call_callback("glVertexAttrib2f", (void*)glVertexAttrib2f, 3, arg0, arg1, arg2); + glad_glVertexAttrib2f(arg0, arg1, arg2); + _post_call_callback("glVertexAttrib2f", (void*)glVertexAttrib2f, 3, arg0, arg1, arg2); + +} +PFNGLVERTEXATTRIB2FPROC glad_debug_glVertexAttrib2f = glad_debug_impl_glVertexAttrib2f; +PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv; +void APIENTRY glad_debug_impl_glVertexAttrib2fv(GLuint arg0, const GLfloat * arg1) { + _pre_call_callback("glVertexAttrib2fv", (void*)glVertexAttrib2fv, 2, arg0, arg1); + glad_glVertexAttrib2fv(arg0, arg1); + _post_call_callback("glVertexAttrib2fv", (void*)glVertexAttrib2fv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB2FVPROC glad_debug_glVertexAttrib2fv = glad_debug_impl_glVertexAttrib2fv; +PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s; +void APIENTRY glad_debug_impl_glVertexAttrib2s(GLuint arg0, GLshort arg1, GLshort arg2) { + _pre_call_callback("glVertexAttrib2s", (void*)glVertexAttrib2s, 3, arg0, arg1, arg2); + glad_glVertexAttrib2s(arg0, arg1, arg2); + _post_call_callback("glVertexAttrib2s", (void*)glVertexAttrib2s, 3, arg0, arg1, arg2); + +} +PFNGLVERTEXATTRIB2SPROC glad_debug_glVertexAttrib2s = glad_debug_impl_glVertexAttrib2s; +PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv; +void APIENTRY glad_debug_impl_glVertexAttrib2sv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttrib2sv", (void*)glVertexAttrib2sv, 2, arg0, arg1); + glad_glVertexAttrib2sv(arg0, arg1); + _post_call_callback("glVertexAttrib2sv", (void*)glVertexAttrib2sv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB2SVPROC glad_debug_glVertexAttrib2sv = glad_debug_impl_glVertexAttrib2sv; +PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d; +void APIENTRY glad_debug_impl_glVertexAttrib3d(GLuint arg0, GLdouble arg1, GLdouble arg2, GLdouble arg3) { + _pre_call_callback("glVertexAttrib3d", (void*)glVertexAttrib3d, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttrib3d(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttrib3d", (void*)glVertexAttrib3d, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIB3DPROC glad_debug_glVertexAttrib3d = glad_debug_impl_glVertexAttrib3d; +PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv; +void APIENTRY glad_debug_impl_glVertexAttrib3dv(GLuint arg0, const GLdouble * arg1) { + _pre_call_callback("glVertexAttrib3dv", (void*)glVertexAttrib3dv, 2, arg0, arg1); + glad_glVertexAttrib3dv(arg0, arg1); + _post_call_callback("glVertexAttrib3dv", (void*)glVertexAttrib3dv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB3DVPROC glad_debug_glVertexAttrib3dv = glad_debug_impl_glVertexAttrib3dv; +PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f; +void APIENTRY glad_debug_impl_glVertexAttrib3f(GLuint arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3) { + _pre_call_callback("glVertexAttrib3f", (void*)glVertexAttrib3f, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttrib3f(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttrib3f", (void*)glVertexAttrib3f, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIB3FPROC glad_debug_glVertexAttrib3f = glad_debug_impl_glVertexAttrib3f; +PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv; +void APIENTRY glad_debug_impl_glVertexAttrib3fv(GLuint arg0, const GLfloat * arg1) { + _pre_call_callback("glVertexAttrib3fv", (void*)glVertexAttrib3fv, 2, arg0, arg1); + glad_glVertexAttrib3fv(arg0, arg1); + _post_call_callback("glVertexAttrib3fv", (void*)glVertexAttrib3fv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB3FVPROC glad_debug_glVertexAttrib3fv = glad_debug_impl_glVertexAttrib3fv; +PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s; +void APIENTRY glad_debug_impl_glVertexAttrib3s(GLuint arg0, GLshort arg1, GLshort arg2, GLshort arg3) { + _pre_call_callback("glVertexAttrib3s", (void*)glVertexAttrib3s, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttrib3s(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttrib3s", (void*)glVertexAttrib3s, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIB3SPROC glad_debug_glVertexAttrib3s = glad_debug_impl_glVertexAttrib3s; +PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv; +void APIENTRY glad_debug_impl_glVertexAttrib3sv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttrib3sv", (void*)glVertexAttrib3sv, 2, arg0, arg1); + glad_glVertexAttrib3sv(arg0, arg1); + _post_call_callback("glVertexAttrib3sv", (void*)glVertexAttrib3sv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB3SVPROC glad_debug_glVertexAttrib3sv = glad_debug_impl_glVertexAttrib3sv; +PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv; +void APIENTRY glad_debug_impl_glVertexAttrib4Nbv(GLuint arg0, const GLbyte * arg1) { + _pre_call_callback("glVertexAttrib4Nbv", (void*)glVertexAttrib4Nbv, 2, arg0, arg1); + glad_glVertexAttrib4Nbv(arg0, arg1); + _post_call_callback("glVertexAttrib4Nbv", (void*)glVertexAttrib4Nbv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NBVPROC glad_debug_glVertexAttrib4Nbv = glad_debug_impl_glVertexAttrib4Nbv; +PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv; +void APIENTRY glad_debug_impl_glVertexAttrib4Niv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttrib4Niv", (void*)glVertexAttrib4Niv, 2, arg0, arg1); + glad_glVertexAttrib4Niv(arg0, arg1); + _post_call_callback("glVertexAttrib4Niv", (void*)glVertexAttrib4Niv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NIVPROC glad_debug_glVertexAttrib4Niv = glad_debug_impl_glVertexAttrib4Niv; +PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv; +void APIENTRY glad_debug_impl_glVertexAttrib4Nsv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttrib4Nsv", (void*)glVertexAttrib4Nsv, 2, arg0, arg1); + glad_glVertexAttrib4Nsv(arg0, arg1); + _post_call_callback("glVertexAttrib4Nsv", (void*)glVertexAttrib4Nsv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NSVPROC glad_debug_glVertexAttrib4Nsv = glad_debug_impl_glVertexAttrib4Nsv; +PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub; +void APIENTRY glad_debug_impl_glVertexAttrib4Nub(GLuint arg0, GLubyte arg1, GLubyte arg2, GLubyte arg3, GLubyte arg4) { + _pre_call_callback("glVertexAttrib4Nub", (void*)glVertexAttrib4Nub, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttrib4Nub(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttrib4Nub", (void*)glVertexAttrib4Nub, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIB4NUBPROC glad_debug_glVertexAttrib4Nub = glad_debug_impl_glVertexAttrib4Nub; +PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv; +void APIENTRY glad_debug_impl_glVertexAttrib4Nubv(GLuint arg0, const GLubyte * arg1) { + _pre_call_callback("glVertexAttrib4Nubv", (void*)glVertexAttrib4Nubv, 2, arg0, arg1); + glad_glVertexAttrib4Nubv(arg0, arg1); + _post_call_callback("glVertexAttrib4Nubv", (void*)glVertexAttrib4Nubv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NUBVPROC glad_debug_glVertexAttrib4Nubv = glad_debug_impl_glVertexAttrib4Nubv; +PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv; +void APIENTRY glad_debug_impl_glVertexAttrib4Nuiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttrib4Nuiv", (void*)glVertexAttrib4Nuiv, 2, arg0, arg1); + glad_glVertexAttrib4Nuiv(arg0, arg1); + _post_call_callback("glVertexAttrib4Nuiv", (void*)glVertexAttrib4Nuiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NUIVPROC glad_debug_glVertexAttrib4Nuiv = glad_debug_impl_glVertexAttrib4Nuiv; +PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv; +void APIENTRY glad_debug_impl_glVertexAttrib4Nusv(GLuint arg0, const GLushort * arg1) { + _pre_call_callback("glVertexAttrib4Nusv", (void*)glVertexAttrib4Nusv, 2, arg0, arg1); + glad_glVertexAttrib4Nusv(arg0, arg1); + _post_call_callback("glVertexAttrib4Nusv", (void*)glVertexAttrib4Nusv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4NUSVPROC glad_debug_glVertexAttrib4Nusv = glad_debug_impl_glVertexAttrib4Nusv; +PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv; +void APIENTRY glad_debug_impl_glVertexAttrib4bv(GLuint arg0, const GLbyte * arg1) { + _pre_call_callback("glVertexAttrib4bv", (void*)glVertexAttrib4bv, 2, arg0, arg1); + glad_glVertexAttrib4bv(arg0, arg1); + _post_call_callback("glVertexAttrib4bv", (void*)glVertexAttrib4bv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4BVPROC glad_debug_glVertexAttrib4bv = glad_debug_impl_glVertexAttrib4bv; +PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d; +void APIENTRY glad_debug_impl_glVertexAttrib4d(GLuint arg0, GLdouble arg1, GLdouble arg2, GLdouble arg3, GLdouble arg4) { + _pre_call_callback("glVertexAttrib4d", (void*)glVertexAttrib4d, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttrib4d(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttrib4d", (void*)glVertexAttrib4d, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIB4DPROC glad_debug_glVertexAttrib4d = glad_debug_impl_glVertexAttrib4d; +PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv; +void APIENTRY glad_debug_impl_glVertexAttrib4dv(GLuint arg0, const GLdouble * arg1) { + _pre_call_callback("glVertexAttrib4dv", (void*)glVertexAttrib4dv, 2, arg0, arg1); + glad_glVertexAttrib4dv(arg0, arg1); + _post_call_callback("glVertexAttrib4dv", (void*)glVertexAttrib4dv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4DVPROC glad_debug_glVertexAttrib4dv = glad_debug_impl_glVertexAttrib4dv; +PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f; +void APIENTRY glad_debug_impl_glVertexAttrib4f(GLuint arg0, GLfloat arg1, GLfloat arg2, GLfloat arg3, GLfloat arg4) { + _pre_call_callback("glVertexAttrib4f", (void*)glVertexAttrib4f, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttrib4f(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttrib4f", (void*)glVertexAttrib4f, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIB4FPROC glad_debug_glVertexAttrib4f = glad_debug_impl_glVertexAttrib4f; +PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv; +void APIENTRY glad_debug_impl_glVertexAttrib4fv(GLuint arg0, const GLfloat * arg1) { + _pre_call_callback("glVertexAttrib4fv", (void*)glVertexAttrib4fv, 2, arg0, arg1); + glad_glVertexAttrib4fv(arg0, arg1); + _post_call_callback("glVertexAttrib4fv", (void*)glVertexAttrib4fv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4FVPROC glad_debug_glVertexAttrib4fv = glad_debug_impl_glVertexAttrib4fv; +PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv; +void APIENTRY glad_debug_impl_glVertexAttrib4iv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttrib4iv", (void*)glVertexAttrib4iv, 2, arg0, arg1); + glad_glVertexAttrib4iv(arg0, arg1); + _post_call_callback("glVertexAttrib4iv", (void*)glVertexAttrib4iv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4IVPROC glad_debug_glVertexAttrib4iv = glad_debug_impl_glVertexAttrib4iv; +PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s; +void APIENTRY glad_debug_impl_glVertexAttrib4s(GLuint arg0, GLshort arg1, GLshort arg2, GLshort arg3, GLshort arg4) { + _pre_call_callback("glVertexAttrib4s", (void*)glVertexAttrib4s, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttrib4s(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttrib4s", (void*)glVertexAttrib4s, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIB4SPROC glad_debug_glVertexAttrib4s = glad_debug_impl_glVertexAttrib4s; +PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv; +void APIENTRY glad_debug_impl_glVertexAttrib4sv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttrib4sv", (void*)glVertexAttrib4sv, 2, arg0, arg1); + glad_glVertexAttrib4sv(arg0, arg1); + _post_call_callback("glVertexAttrib4sv", (void*)glVertexAttrib4sv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4SVPROC glad_debug_glVertexAttrib4sv = glad_debug_impl_glVertexAttrib4sv; +PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv; +void APIENTRY glad_debug_impl_glVertexAttrib4ubv(GLuint arg0, const GLubyte * arg1) { + _pre_call_callback("glVertexAttrib4ubv", (void*)glVertexAttrib4ubv, 2, arg0, arg1); + glad_glVertexAttrib4ubv(arg0, arg1); + _post_call_callback("glVertexAttrib4ubv", (void*)glVertexAttrib4ubv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4UBVPROC glad_debug_glVertexAttrib4ubv = glad_debug_impl_glVertexAttrib4ubv; +PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv; +void APIENTRY glad_debug_impl_glVertexAttrib4uiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttrib4uiv", (void*)glVertexAttrib4uiv, 2, arg0, arg1); + glad_glVertexAttrib4uiv(arg0, arg1); + _post_call_callback("glVertexAttrib4uiv", (void*)glVertexAttrib4uiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4UIVPROC glad_debug_glVertexAttrib4uiv = glad_debug_impl_glVertexAttrib4uiv; +PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv; +void APIENTRY glad_debug_impl_glVertexAttrib4usv(GLuint arg0, const GLushort * arg1) { + _pre_call_callback("glVertexAttrib4usv", (void*)glVertexAttrib4usv, 2, arg0, arg1); + glad_glVertexAttrib4usv(arg0, arg1); + _post_call_callback("glVertexAttrib4usv", (void*)glVertexAttrib4usv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIB4USVPROC glad_debug_glVertexAttrib4usv = glad_debug_impl_glVertexAttrib4usv; +PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor; +void APIENTRY glad_debug_impl_glVertexAttribDivisor(GLuint arg0, GLuint arg1) { + _pre_call_callback("glVertexAttribDivisor", (void*)glVertexAttribDivisor, 2, arg0, arg1); + glad_glVertexAttribDivisor(arg0, arg1); + _post_call_callback("glVertexAttribDivisor", (void*)glVertexAttribDivisor, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBDIVISORPROC glad_debug_glVertexAttribDivisor = glad_debug_impl_glVertexAttribDivisor; +PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i; +void APIENTRY glad_debug_impl_glVertexAttribI1i(GLuint arg0, GLint arg1) { + _pre_call_callback("glVertexAttribI1i", (void*)glVertexAttribI1i, 2, arg0, arg1); + glad_glVertexAttribI1i(arg0, arg1); + _post_call_callback("glVertexAttribI1i", (void*)glVertexAttribI1i, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI1IPROC glad_debug_glVertexAttribI1i = glad_debug_impl_glVertexAttribI1i; +PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv; +void APIENTRY glad_debug_impl_glVertexAttribI1iv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttribI1iv", (void*)glVertexAttribI1iv, 2, arg0, arg1); + glad_glVertexAttribI1iv(arg0, arg1); + _post_call_callback("glVertexAttribI1iv", (void*)glVertexAttribI1iv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI1IVPROC glad_debug_glVertexAttribI1iv = glad_debug_impl_glVertexAttribI1iv; +PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui; +void APIENTRY glad_debug_impl_glVertexAttribI1ui(GLuint arg0, GLuint arg1) { + _pre_call_callback("glVertexAttribI1ui", (void*)glVertexAttribI1ui, 2, arg0, arg1); + glad_glVertexAttribI1ui(arg0, arg1); + _post_call_callback("glVertexAttribI1ui", (void*)glVertexAttribI1ui, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI1UIPROC glad_debug_glVertexAttribI1ui = glad_debug_impl_glVertexAttribI1ui; +PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv; +void APIENTRY glad_debug_impl_glVertexAttribI1uiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttribI1uiv", (void*)glVertexAttribI1uiv, 2, arg0, arg1); + glad_glVertexAttribI1uiv(arg0, arg1); + _post_call_callback("glVertexAttribI1uiv", (void*)glVertexAttribI1uiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI1UIVPROC glad_debug_glVertexAttribI1uiv = glad_debug_impl_glVertexAttribI1uiv; +PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i; +void APIENTRY glad_debug_impl_glVertexAttribI2i(GLuint arg0, GLint arg1, GLint arg2) { + _pre_call_callback("glVertexAttribI2i", (void*)glVertexAttribI2i, 3, arg0, arg1, arg2); + glad_glVertexAttribI2i(arg0, arg1, arg2); + _post_call_callback("glVertexAttribI2i", (void*)glVertexAttribI2i, 3, arg0, arg1, arg2); + +} +PFNGLVERTEXATTRIBI2IPROC glad_debug_glVertexAttribI2i = glad_debug_impl_glVertexAttribI2i; +PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv; +void APIENTRY glad_debug_impl_glVertexAttribI2iv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttribI2iv", (void*)glVertexAttribI2iv, 2, arg0, arg1); + glad_glVertexAttribI2iv(arg0, arg1); + _post_call_callback("glVertexAttribI2iv", (void*)glVertexAttribI2iv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI2IVPROC glad_debug_glVertexAttribI2iv = glad_debug_impl_glVertexAttribI2iv; +PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui; +void APIENTRY glad_debug_impl_glVertexAttribI2ui(GLuint arg0, GLuint arg1, GLuint arg2) { + _pre_call_callback("glVertexAttribI2ui", (void*)glVertexAttribI2ui, 3, arg0, arg1, arg2); + glad_glVertexAttribI2ui(arg0, arg1, arg2); + _post_call_callback("glVertexAttribI2ui", (void*)glVertexAttribI2ui, 3, arg0, arg1, arg2); + +} +PFNGLVERTEXATTRIBI2UIPROC glad_debug_glVertexAttribI2ui = glad_debug_impl_glVertexAttribI2ui; +PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv; +void APIENTRY glad_debug_impl_glVertexAttribI2uiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttribI2uiv", (void*)glVertexAttribI2uiv, 2, arg0, arg1); + glad_glVertexAttribI2uiv(arg0, arg1); + _post_call_callback("glVertexAttribI2uiv", (void*)glVertexAttribI2uiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI2UIVPROC glad_debug_glVertexAttribI2uiv = glad_debug_impl_glVertexAttribI2uiv; +PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i; +void APIENTRY glad_debug_impl_glVertexAttribI3i(GLuint arg0, GLint arg1, GLint arg2, GLint arg3) { + _pre_call_callback("glVertexAttribI3i", (void*)glVertexAttribI3i, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribI3i(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribI3i", (void*)glVertexAttribI3i, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBI3IPROC glad_debug_glVertexAttribI3i = glad_debug_impl_glVertexAttribI3i; +PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv; +void APIENTRY glad_debug_impl_glVertexAttribI3iv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttribI3iv", (void*)glVertexAttribI3iv, 2, arg0, arg1); + glad_glVertexAttribI3iv(arg0, arg1); + _post_call_callback("glVertexAttribI3iv", (void*)glVertexAttribI3iv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI3IVPROC glad_debug_glVertexAttribI3iv = glad_debug_impl_glVertexAttribI3iv; +PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui; +void APIENTRY glad_debug_impl_glVertexAttribI3ui(GLuint arg0, GLuint arg1, GLuint arg2, GLuint arg3) { + _pre_call_callback("glVertexAttribI3ui", (void*)glVertexAttribI3ui, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribI3ui(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribI3ui", (void*)glVertexAttribI3ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBI3UIPROC glad_debug_glVertexAttribI3ui = glad_debug_impl_glVertexAttribI3ui; +PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv; +void APIENTRY glad_debug_impl_glVertexAttribI3uiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttribI3uiv", (void*)glVertexAttribI3uiv, 2, arg0, arg1); + glad_glVertexAttribI3uiv(arg0, arg1); + _post_call_callback("glVertexAttribI3uiv", (void*)glVertexAttribI3uiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI3UIVPROC glad_debug_glVertexAttribI3uiv = glad_debug_impl_glVertexAttribI3uiv; +PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv; +void APIENTRY glad_debug_impl_glVertexAttribI4bv(GLuint arg0, const GLbyte * arg1) { + _pre_call_callback("glVertexAttribI4bv", (void*)glVertexAttribI4bv, 2, arg0, arg1); + glad_glVertexAttribI4bv(arg0, arg1); + _post_call_callback("glVertexAttribI4bv", (void*)glVertexAttribI4bv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4BVPROC glad_debug_glVertexAttribI4bv = glad_debug_impl_glVertexAttribI4bv; +PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i; +void APIENTRY glad_debug_impl_glVertexAttribI4i(GLuint arg0, GLint arg1, GLint arg2, GLint arg3, GLint arg4) { + _pre_call_callback("glVertexAttribI4i", (void*)glVertexAttribI4i, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttribI4i(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttribI4i", (void*)glVertexAttribI4i, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIBI4IPROC glad_debug_glVertexAttribI4i = glad_debug_impl_glVertexAttribI4i; +PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv; +void APIENTRY glad_debug_impl_glVertexAttribI4iv(GLuint arg0, const GLint * arg1) { + _pre_call_callback("glVertexAttribI4iv", (void*)glVertexAttribI4iv, 2, arg0, arg1); + glad_glVertexAttribI4iv(arg0, arg1); + _post_call_callback("glVertexAttribI4iv", (void*)glVertexAttribI4iv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4IVPROC glad_debug_glVertexAttribI4iv = glad_debug_impl_glVertexAttribI4iv; +PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv; +void APIENTRY glad_debug_impl_glVertexAttribI4sv(GLuint arg0, const GLshort * arg1) { + _pre_call_callback("glVertexAttribI4sv", (void*)glVertexAttribI4sv, 2, arg0, arg1); + glad_glVertexAttribI4sv(arg0, arg1); + _post_call_callback("glVertexAttribI4sv", (void*)glVertexAttribI4sv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4SVPROC glad_debug_glVertexAttribI4sv = glad_debug_impl_glVertexAttribI4sv; +PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv; +void APIENTRY glad_debug_impl_glVertexAttribI4ubv(GLuint arg0, const GLubyte * arg1) { + _pre_call_callback("glVertexAttribI4ubv", (void*)glVertexAttribI4ubv, 2, arg0, arg1); + glad_glVertexAttribI4ubv(arg0, arg1); + _post_call_callback("glVertexAttribI4ubv", (void*)glVertexAttribI4ubv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4UBVPROC glad_debug_glVertexAttribI4ubv = glad_debug_impl_glVertexAttribI4ubv; +PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui; +void APIENTRY glad_debug_impl_glVertexAttribI4ui(GLuint arg0, GLuint arg1, GLuint arg2, GLuint arg3, GLuint arg4) { + _pre_call_callback("glVertexAttribI4ui", (void*)glVertexAttribI4ui, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttribI4ui(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttribI4ui", (void*)glVertexAttribI4ui, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIBI4UIPROC glad_debug_glVertexAttribI4ui = glad_debug_impl_glVertexAttribI4ui; +PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv; +void APIENTRY glad_debug_impl_glVertexAttribI4uiv(GLuint arg0, const GLuint * arg1) { + _pre_call_callback("glVertexAttribI4uiv", (void*)glVertexAttribI4uiv, 2, arg0, arg1); + glad_glVertexAttribI4uiv(arg0, arg1); + _post_call_callback("glVertexAttribI4uiv", (void*)glVertexAttribI4uiv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4UIVPROC glad_debug_glVertexAttribI4uiv = glad_debug_impl_glVertexAttribI4uiv; +PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv; +void APIENTRY glad_debug_impl_glVertexAttribI4usv(GLuint arg0, const GLushort * arg1) { + _pre_call_callback("glVertexAttribI4usv", (void*)glVertexAttribI4usv, 2, arg0, arg1); + glad_glVertexAttribI4usv(arg0, arg1); + _post_call_callback("glVertexAttribI4usv", (void*)glVertexAttribI4usv, 2, arg0, arg1); + +} +PFNGLVERTEXATTRIBI4USVPROC glad_debug_glVertexAttribI4usv = glad_debug_impl_glVertexAttribI4usv; +PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer; +void APIENTRY glad_debug_impl_glVertexAttribIPointer(GLuint arg0, GLint arg1, GLenum arg2, GLsizei arg3, const void * arg4) { + _pre_call_callback("glVertexAttribIPointer", (void*)glVertexAttribIPointer, 5, arg0, arg1, arg2, arg3, arg4); + glad_glVertexAttribIPointer(arg0, arg1, arg2, arg3, arg4); + _post_call_callback("glVertexAttribIPointer", (void*)glVertexAttribIPointer, 5, arg0, arg1, arg2, arg3, arg4); + +} +PFNGLVERTEXATTRIBIPOINTERPROC glad_debug_glVertexAttribIPointer = glad_debug_impl_glVertexAttribIPointer; +PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui; +void APIENTRY glad_debug_impl_glVertexAttribP1ui(GLuint arg0, GLenum arg1, GLboolean arg2, GLuint arg3) { + _pre_call_callback("glVertexAttribP1ui", (void*)glVertexAttribP1ui, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP1ui(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP1ui", (void*)glVertexAttribP1ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP1UIPROC glad_debug_glVertexAttribP1ui = glad_debug_impl_glVertexAttribP1ui; +PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv; +void APIENTRY glad_debug_impl_glVertexAttribP1uiv(GLuint arg0, GLenum arg1, GLboolean arg2, const GLuint * arg3) { + _pre_call_callback("glVertexAttribP1uiv", (void*)glVertexAttribP1uiv, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP1uiv(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP1uiv", (void*)glVertexAttribP1uiv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP1UIVPROC glad_debug_glVertexAttribP1uiv = glad_debug_impl_glVertexAttribP1uiv; +PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui; +void APIENTRY glad_debug_impl_glVertexAttribP2ui(GLuint arg0, GLenum arg1, GLboolean arg2, GLuint arg3) { + _pre_call_callback("glVertexAttribP2ui", (void*)glVertexAttribP2ui, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP2ui(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP2ui", (void*)glVertexAttribP2ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP2UIPROC glad_debug_glVertexAttribP2ui = glad_debug_impl_glVertexAttribP2ui; +PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv; +void APIENTRY glad_debug_impl_glVertexAttribP2uiv(GLuint arg0, GLenum arg1, GLboolean arg2, const GLuint * arg3) { + _pre_call_callback("glVertexAttribP2uiv", (void*)glVertexAttribP2uiv, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP2uiv(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP2uiv", (void*)glVertexAttribP2uiv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP2UIVPROC glad_debug_glVertexAttribP2uiv = glad_debug_impl_glVertexAttribP2uiv; +PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui; +void APIENTRY glad_debug_impl_glVertexAttribP3ui(GLuint arg0, GLenum arg1, GLboolean arg2, GLuint arg3) { + _pre_call_callback("glVertexAttribP3ui", (void*)glVertexAttribP3ui, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP3ui(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP3ui", (void*)glVertexAttribP3ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP3UIPROC glad_debug_glVertexAttribP3ui = glad_debug_impl_glVertexAttribP3ui; +PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv; +void APIENTRY glad_debug_impl_glVertexAttribP3uiv(GLuint arg0, GLenum arg1, GLboolean arg2, const GLuint * arg3) { + _pre_call_callback("glVertexAttribP3uiv", (void*)glVertexAttribP3uiv, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP3uiv(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP3uiv", (void*)glVertexAttribP3uiv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP3UIVPROC glad_debug_glVertexAttribP3uiv = glad_debug_impl_glVertexAttribP3uiv; +PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui; +void APIENTRY glad_debug_impl_glVertexAttribP4ui(GLuint arg0, GLenum arg1, GLboolean arg2, GLuint arg3) { + _pre_call_callback("glVertexAttribP4ui", (void*)glVertexAttribP4ui, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP4ui(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP4ui", (void*)glVertexAttribP4ui, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP4UIPROC glad_debug_glVertexAttribP4ui = glad_debug_impl_glVertexAttribP4ui; +PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv; +void APIENTRY glad_debug_impl_glVertexAttribP4uiv(GLuint arg0, GLenum arg1, GLboolean arg2, const GLuint * arg3) { + _pre_call_callback("glVertexAttribP4uiv", (void*)glVertexAttribP4uiv, 4, arg0, arg1, arg2, arg3); + glad_glVertexAttribP4uiv(arg0, arg1, arg2, arg3); + _post_call_callback("glVertexAttribP4uiv", (void*)glVertexAttribP4uiv, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVERTEXATTRIBP4UIVPROC glad_debug_glVertexAttribP4uiv = glad_debug_impl_glVertexAttribP4uiv; +PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer; +void APIENTRY glad_debug_impl_glVertexAttribPointer(GLuint arg0, GLint arg1, GLenum arg2, GLboolean arg3, GLsizei arg4, const void * arg5) { + _pre_call_callback("glVertexAttribPointer", (void*)glVertexAttribPointer, 6, arg0, arg1, arg2, arg3, arg4, arg5); + glad_glVertexAttribPointer(arg0, arg1, arg2, arg3, arg4, arg5); + _post_call_callback("glVertexAttribPointer", (void*)glVertexAttribPointer, 6, arg0, arg1, arg2, arg3, arg4, arg5); + +} +PFNGLVERTEXATTRIBPOINTERPROC glad_debug_glVertexAttribPointer = glad_debug_impl_glVertexAttribPointer; +PFNGLVERTEXP2UIPROC glad_glVertexP2ui; +void APIENTRY glad_debug_impl_glVertexP2ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glVertexP2ui", (void*)glVertexP2ui, 2, arg0, arg1); + glad_glVertexP2ui(arg0, arg1); + _post_call_callback("glVertexP2ui", (void*)glVertexP2ui, 2, arg0, arg1); + +} +PFNGLVERTEXP2UIPROC glad_debug_glVertexP2ui = glad_debug_impl_glVertexP2ui; +PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv; +void APIENTRY glad_debug_impl_glVertexP2uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glVertexP2uiv", (void*)glVertexP2uiv, 2, arg0, arg1); + glad_glVertexP2uiv(arg0, arg1); + _post_call_callback("glVertexP2uiv", (void*)glVertexP2uiv, 2, arg0, arg1); + +} +PFNGLVERTEXP2UIVPROC glad_debug_glVertexP2uiv = glad_debug_impl_glVertexP2uiv; +PFNGLVERTEXP3UIPROC glad_glVertexP3ui; +void APIENTRY glad_debug_impl_glVertexP3ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glVertexP3ui", (void*)glVertexP3ui, 2, arg0, arg1); + glad_glVertexP3ui(arg0, arg1); + _post_call_callback("glVertexP3ui", (void*)glVertexP3ui, 2, arg0, arg1); + +} +PFNGLVERTEXP3UIPROC glad_debug_glVertexP3ui = glad_debug_impl_glVertexP3ui; +PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv; +void APIENTRY glad_debug_impl_glVertexP3uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glVertexP3uiv", (void*)glVertexP3uiv, 2, arg0, arg1); + glad_glVertexP3uiv(arg0, arg1); + _post_call_callback("glVertexP3uiv", (void*)glVertexP3uiv, 2, arg0, arg1); + +} +PFNGLVERTEXP3UIVPROC glad_debug_glVertexP3uiv = glad_debug_impl_glVertexP3uiv; +PFNGLVERTEXP4UIPROC glad_glVertexP4ui; +void APIENTRY glad_debug_impl_glVertexP4ui(GLenum arg0, GLuint arg1) { + _pre_call_callback("glVertexP4ui", (void*)glVertexP4ui, 2, arg0, arg1); + glad_glVertexP4ui(arg0, arg1); + _post_call_callback("glVertexP4ui", (void*)glVertexP4ui, 2, arg0, arg1); + +} +PFNGLVERTEXP4UIPROC glad_debug_glVertexP4ui = glad_debug_impl_glVertexP4ui; +PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv; +void APIENTRY glad_debug_impl_glVertexP4uiv(GLenum arg0, const GLuint * arg1) { + _pre_call_callback("glVertexP4uiv", (void*)glVertexP4uiv, 2, arg0, arg1); + glad_glVertexP4uiv(arg0, arg1); + _post_call_callback("glVertexP4uiv", (void*)glVertexP4uiv, 2, arg0, arg1); + +} +PFNGLVERTEXP4UIVPROC glad_debug_glVertexP4uiv = glad_debug_impl_glVertexP4uiv; +PFNGLVIEWPORTPROC glad_glViewport; +void APIENTRY glad_debug_impl_glViewport(GLint arg0, GLint arg1, GLsizei arg2, GLsizei arg3) { + _pre_call_callback("glViewport", (void*)glViewport, 4, arg0, arg1, arg2, arg3); + glad_glViewport(arg0, arg1, arg2, arg3); + _post_call_callback("glViewport", (void*)glViewport, 4, arg0, arg1, arg2, arg3); + +} +PFNGLVIEWPORTPROC glad_debug_glViewport = glad_debug_impl_glViewport; +PFNGLWAITSYNCPROC glad_glWaitSync; +void APIENTRY glad_debug_impl_glWaitSync(GLsync arg0, GLbitfield arg1, GLuint64 arg2) { + _pre_call_callback("glWaitSync", (void*)glWaitSync, 3, arg0, arg1, arg2); + glad_glWaitSync(arg0, arg1, arg2); + _post_call_callback("glWaitSync", (void*)glWaitSync, 3, arg0, arg1, arg2); + +} +PFNGLWAITSYNCPROC glad_debug_glWaitSync = glad_debug_impl_glWaitSync; +static void load_GL_VERSION_1_0(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_0) return; + glad_glCullFace = (PFNGLCULLFACEPROC)load("glCullFace"); + glad_glFrontFace = (PFNGLFRONTFACEPROC)load("glFrontFace"); + glad_glHint = (PFNGLHINTPROC)load("glHint"); + glad_glLineWidth = (PFNGLLINEWIDTHPROC)load("glLineWidth"); + glad_glPointSize = (PFNGLPOINTSIZEPROC)load("glPointSize"); + glad_glPolygonMode = (PFNGLPOLYGONMODEPROC)load("glPolygonMode"); + glad_glScissor = (PFNGLSCISSORPROC)load("glScissor"); + glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC)load("glTexParameterf"); + glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC)load("glTexParameterfv"); + glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC)load("glTexParameteri"); + glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC)load("glTexParameteriv"); + glad_glTexImage1D = (PFNGLTEXIMAGE1DPROC)load("glTexImage1D"); + glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC)load("glTexImage2D"); + glad_glDrawBuffer = (PFNGLDRAWBUFFERPROC)load("glDrawBuffer"); + glad_glClear = (PFNGLCLEARPROC)load("glClear"); + glad_glClearColor = (PFNGLCLEARCOLORPROC)load("glClearColor"); + glad_glClearStencil = (PFNGLCLEARSTENCILPROC)load("glClearStencil"); + glad_glClearDepth = (PFNGLCLEARDEPTHPROC)load("glClearDepth"); + glad_glStencilMask = (PFNGLSTENCILMASKPROC)load("glStencilMask"); + glad_glColorMask = (PFNGLCOLORMASKPROC)load("glColorMask"); + glad_glDepthMask = (PFNGLDEPTHMASKPROC)load("glDepthMask"); + glad_glDisable = (PFNGLDISABLEPROC)load("glDisable"); + glad_glEnable = (PFNGLENABLEPROC)load("glEnable"); + glad_glFinish = (PFNGLFINISHPROC)load("glFinish"); + glad_glFlush = (PFNGLFLUSHPROC)load("glFlush"); + glad_glBlendFunc = (PFNGLBLENDFUNCPROC)load("glBlendFunc"); + glad_glLogicOp = (PFNGLLOGICOPPROC)load("glLogicOp"); + glad_glStencilFunc = (PFNGLSTENCILFUNCPROC)load("glStencilFunc"); + glad_glStencilOp = (PFNGLSTENCILOPPROC)load("glStencilOp"); + glad_glDepthFunc = (PFNGLDEPTHFUNCPROC)load("glDepthFunc"); + glad_glPixelStoref = (PFNGLPIXELSTOREFPROC)load("glPixelStoref"); + glad_glPixelStorei = (PFNGLPIXELSTOREIPROC)load("glPixelStorei"); + glad_glReadBuffer = (PFNGLREADBUFFERPROC)load("glReadBuffer"); + glad_glReadPixels = (PFNGLREADPIXELSPROC)load("glReadPixels"); + glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC)load("glGetBooleanv"); + glad_glGetDoublev = (PFNGLGETDOUBLEVPROC)load("glGetDoublev"); + glad_glGetError = (PFNGLGETERRORPROC)load("glGetError"); + glad_glGetFloatv = (PFNGLGETFLOATVPROC)load("glGetFloatv"); + glad_glGetIntegerv = (PFNGLGETINTEGERVPROC)load("glGetIntegerv"); + glad_glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); + glad_glGetTexImage = (PFNGLGETTEXIMAGEPROC)load("glGetTexImage"); + glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC)load("glGetTexParameterfv"); + glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC)load("glGetTexParameteriv"); + glad_glGetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC)load("glGetTexLevelParameterfv"); + glad_glGetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC)load("glGetTexLevelParameteriv"); + glad_glIsEnabled = (PFNGLISENABLEDPROC)load("glIsEnabled"); + glad_glDepthRange = (PFNGLDEPTHRANGEPROC)load("glDepthRange"); + glad_glViewport = (PFNGLVIEWPORTPROC)load("glViewport"); +} +static void load_GL_VERSION_1_1(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_1) return; + glad_glDrawArrays = (PFNGLDRAWARRAYSPROC)load("glDrawArrays"); + glad_glDrawElements = (PFNGLDRAWELEMENTSPROC)load("glDrawElements"); + glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC)load("glPolygonOffset"); + glad_glCopyTexImage1D = (PFNGLCOPYTEXIMAGE1DPROC)load("glCopyTexImage1D"); + glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC)load("glCopyTexImage2D"); + glad_glCopyTexSubImage1D = (PFNGLCOPYTEXSUBIMAGE1DPROC)load("glCopyTexSubImage1D"); + glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC)load("glCopyTexSubImage2D"); + glad_glTexSubImage1D = (PFNGLTEXSUBIMAGE1DPROC)load("glTexSubImage1D"); + glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC)load("glTexSubImage2D"); + glad_glBindTexture = (PFNGLBINDTEXTUREPROC)load("glBindTexture"); + glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC)load("glDeleteTextures"); + glad_glGenTextures = (PFNGLGENTEXTURESPROC)load("glGenTextures"); + glad_glIsTexture = (PFNGLISTEXTUREPROC)load("glIsTexture"); +} +static void load_GL_VERSION_1_2(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_2) return; + glad_glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC)load("glDrawRangeElements"); + glad_glTexImage3D = (PFNGLTEXIMAGE3DPROC)load("glTexImage3D"); + glad_glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC)load("glTexSubImage3D"); + glad_glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC)load("glCopyTexSubImage3D"); +} +static void load_GL_VERSION_1_3(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_3) return; + glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC)load("glActiveTexture"); + glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC)load("glSampleCoverage"); + glad_glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC)load("glCompressedTexImage3D"); + glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC)load("glCompressedTexImage2D"); + glad_glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC)load("glCompressedTexImage1D"); + glad_glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)load("glCompressedTexSubImage3D"); + glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)load("glCompressedTexSubImage2D"); + glad_glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)load("glCompressedTexSubImage1D"); + glad_glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC)load("glGetCompressedTexImage"); +} +static void load_GL_VERSION_1_4(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_4) return; + glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC)load("glBlendFuncSeparate"); + glad_glMultiDrawArrays = (PFNGLMULTIDRAWARRAYSPROC)load("glMultiDrawArrays"); + glad_glMultiDrawElements = (PFNGLMULTIDRAWELEMENTSPROC)load("glMultiDrawElements"); + glad_glPointParameterf = (PFNGLPOINTPARAMETERFPROC)load("glPointParameterf"); + glad_glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC)load("glPointParameterfv"); + glad_glPointParameteri = (PFNGLPOINTPARAMETERIPROC)load("glPointParameteri"); + glad_glPointParameteriv = (PFNGLPOINTPARAMETERIVPROC)load("glPointParameteriv"); + glad_glBlendColor = (PFNGLBLENDCOLORPROC)load("glBlendColor"); + glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC)load("glBlendEquation"); +} +static void load_GL_VERSION_1_5(GLADloadproc load) { + if(!GLAD_GL_VERSION_1_5) return; + glad_glGenQueries = (PFNGLGENQUERIESPROC)load("glGenQueries"); + glad_glDeleteQueries = (PFNGLDELETEQUERIESPROC)load("glDeleteQueries"); + glad_glIsQuery = (PFNGLISQUERYPROC)load("glIsQuery"); + glad_glBeginQuery = (PFNGLBEGINQUERYPROC)load("glBeginQuery"); + glad_glEndQuery = (PFNGLENDQUERYPROC)load("glEndQuery"); + glad_glGetQueryiv = (PFNGLGETQUERYIVPROC)load("glGetQueryiv"); + glad_glGetQueryObjectiv = (PFNGLGETQUERYOBJECTIVPROC)load("glGetQueryObjectiv"); + glad_glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC)load("glGetQueryObjectuiv"); + glad_glBindBuffer = (PFNGLBINDBUFFERPROC)load("glBindBuffer"); + glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC)load("glDeleteBuffers"); + glad_glGenBuffers = (PFNGLGENBUFFERSPROC)load("glGenBuffers"); + glad_glIsBuffer = (PFNGLISBUFFERPROC)load("glIsBuffer"); + glad_glBufferData = (PFNGLBUFFERDATAPROC)load("glBufferData"); + glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC)load("glBufferSubData"); + glad_glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC)load("glGetBufferSubData"); + glad_glMapBuffer = (PFNGLMAPBUFFERPROC)load("glMapBuffer"); + glad_glUnmapBuffer = (PFNGLUNMAPBUFFERPROC)load("glUnmapBuffer"); + glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC)load("glGetBufferParameteriv"); + glad_glGetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC)load("glGetBufferPointerv"); +} +static void load_GL_VERSION_2_0(GLADloadproc load) { + if(!GLAD_GL_VERSION_2_0) return; + glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC)load("glBlendEquationSeparate"); + glad_glDrawBuffers = (PFNGLDRAWBUFFERSPROC)load("glDrawBuffers"); + glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC)load("glStencilOpSeparate"); + glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC)load("glStencilFuncSeparate"); + glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC)load("glStencilMaskSeparate"); + glad_glAttachShader = (PFNGLATTACHSHADERPROC)load("glAttachShader"); + glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC)load("glBindAttribLocation"); + glad_glCompileShader = (PFNGLCOMPILESHADERPROC)load("glCompileShader"); + glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC)load("glCreateProgram"); + glad_glCreateShader = (PFNGLCREATESHADERPROC)load("glCreateShader"); + glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC)load("glDeleteProgram"); + glad_glDeleteShader = (PFNGLDELETESHADERPROC)load("glDeleteShader"); + glad_glDetachShader = (PFNGLDETACHSHADERPROC)load("glDetachShader"); + glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC)load("glDisableVertexAttribArray"); + glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC)load("glEnableVertexAttribArray"); + glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC)load("glGetActiveAttrib"); + glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC)load("glGetActiveUniform"); + glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC)load("glGetAttachedShaders"); + glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC)load("glGetAttribLocation"); + glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC)load("glGetProgramiv"); + glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC)load("glGetProgramInfoLog"); + glad_glGetShaderiv = (PFNGLGETSHADERIVPROC)load("glGetShaderiv"); + glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC)load("glGetShaderInfoLog"); + glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC)load("glGetShaderSource"); + glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)load("glGetUniformLocation"); + glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC)load("glGetUniformfv"); + glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC)load("glGetUniformiv"); + glad_glGetVertexAttribdv = (PFNGLGETVERTEXATTRIBDVPROC)load("glGetVertexAttribdv"); + glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC)load("glGetVertexAttribfv"); + glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC)load("glGetVertexAttribiv"); + glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC)load("glGetVertexAttribPointerv"); + glad_glIsProgram = (PFNGLISPROGRAMPROC)load("glIsProgram"); + glad_glIsShader = (PFNGLISSHADERPROC)load("glIsShader"); + glad_glLinkProgram = (PFNGLLINKPROGRAMPROC)load("glLinkProgram"); + glad_glShaderSource = (PFNGLSHADERSOURCEPROC)load("glShaderSource"); + glad_glUseProgram = (PFNGLUSEPROGRAMPROC)load("glUseProgram"); + glad_glUniform1f = (PFNGLUNIFORM1FPROC)load("glUniform1f"); + glad_glUniform2f = (PFNGLUNIFORM2FPROC)load("glUniform2f"); + glad_glUniform3f = (PFNGLUNIFORM3FPROC)load("glUniform3f"); + glad_glUniform4f = (PFNGLUNIFORM4FPROC)load("glUniform4f"); + glad_glUniform1i = (PFNGLUNIFORM1IPROC)load("glUniform1i"); + glad_glUniform2i = (PFNGLUNIFORM2IPROC)load("glUniform2i"); + glad_glUniform3i = (PFNGLUNIFORM3IPROC)load("glUniform3i"); + glad_glUniform4i = (PFNGLUNIFORM4IPROC)load("glUniform4i"); + glad_glUniform1fv = (PFNGLUNIFORM1FVPROC)load("glUniform1fv"); + glad_glUniform2fv = (PFNGLUNIFORM2FVPROC)load("glUniform2fv"); + glad_glUniform3fv = (PFNGLUNIFORM3FVPROC)load("glUniform3fv"); + glad_glUniform4fv = (PFNGLUNIFORM4FVPROC)load("glUniform4fv"); + glad_glUniform1iv = (PFNGLUNIFORM1IVPROC)load("glUniform1iv"); + glad_glUniform2iv = (PFNGLUNIFORM2IVPROC)load("glUniform2iv"); + glad_glUniform3iv = (PFNGLUNIFORM3IVPROC)load("glUniform3iv"); + glad_glUniform4iv = (PFNGLUNIFORM4IVPROC)load("glUniform4iv"); + glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)load("glUniformMatrix2fv"); + glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC)load("glUniformMatrix3fv"); + glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC)load("glUniformMatrix4fv"); + glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC)load("glValidateProgram"); + glad_glVertexAttrib1d = (PFNGLVERTEXATTRIB1DPROC)load("glVertexAttrib1d"); + glad_glVertexAttrib1dv = (PFNGLVERTEXATTRIB1DVPROC)load("glVertexAttrib1dv"); + glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC)load("glVertexAttrib1f"); + glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC)load("glVertexAttrib1fv"); + glad_glVertexAttrib1s = (PFNGLVERTEXATTRIB1SPROC)load("glVertexAttrib1s"); + glad_glVertexAttrib1sv = (PFNGLVERTEXATTRIB1SVPROC)load("glVertexAttrib1sv"); + glad_glVertexAttrib2d = (PFNGLVERTEXATTRIB2DPROC)load("glVertexAttrib2d"); + glad_glVertexAttrib2dv = (PFNGLVERTEXATTRIB2DVPROC)load("glVertexAttrib2dv"); + glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC)load("glVertexAttrib2f"); + glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC)load("glVertexAttrib2fv"); + glad_glVertexAttrib2s = (PFNGLVERTEXATTRIB2SPROC)load("glVertexAttrib2s"); + glad_glVertexAttrib2sv = (PFNGLVERTEXATTRIB2SVPROC)load("glVertexAttrib2sv"); + glad_glVertexAttrib3d = (PFNGLVERTEXATTRIB3DPROC)load("glVertexAttrib3d"); + glad_glVertexAttrib3dv = (PFNGLVERTEXATTRIB3DVPROC)load("glVertexAttrib3dv"); + glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC)load("glVertexAttrib3f"); + glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC)load("glVertexAttrib3fv"); + glad_glVertexAttrib3s = (PFNGLVERTEXATTRIB3SPROC)load("glVertexAttrib3s"); + glad_glVertexAttrib3sv = (PFNGLVERTEXATTRIB3SVPROC)load("glVertexAttrib3sv"); + glad_glVertexAttrib4Nbv = (PFNGLVERTEXATTRIB4NBVPROC)load("glVertexAttrib4Nbv"); + glad_glVertexAttrib4Niv = (PFNGLVERTEXATTRIB4NIVPROC)load("glVertexAttrib4Niv"); + glad_glVertexAttrib4Nsv = (PFNGLVERTEXATTRIB4NSVPROC)load("glVertexAttrib4Nsv"); + glad_glVertexAttrib4Nub = (PFNGLVERTEXATTRIB4NUBPROC)load("glVertexAttrib4Nub"); + glad_glVertexAttrib4Nubv = (PFNGLVERTEXATTRIB4NUBVPROC)load("glVertexAttrib4Nubv"); + glad_glVertexAttrib4Nuiv = (PFNGLVERTEXATTRIB4NUIVPROC)load("glVertexAttrib4Nuiv"); + glad_glVertexAttrib4Nusv = (PFNGLVERTEXATTRIB4NUSVPROC)load("glVertexAttrib4Nusv"); + glad_glVertexAttrib4bv = (PFNGLVERTEXATTRIB4BVPROC)load("glVertexAttrib4bv"); + glad_glVertexAttrib4d = (PFNGLVERTEXATTRIB4DPROC)load("glVertexAttrib4d"); + glad_glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC)load("glVertexAttrib4dv"); + glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC)load("glVertexAttrib4f"); + glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC)load("glVertexAttrib4fv"); + glad_glVertexAttrib4iv = (PFNGLVERTEXATTRIB4IVPROC)load("glVertexAttrib4iv"); + glad_glVertexAttrib4s = (PFNGLVERTEXATTRIB4SPROC)load("glVertexAttrib4s"); + glad_glVertexAttrib4sv = (PFNGLVERTEXATTRIB4SVPROC)load("glVertexAttrib4sv"); + glad_glVertexAttrib4ubv = (PFNGLVERTEXATTRIB4UBVPROC)load("glVertexAttrib4ubv"); + glad_glVertexAttrib4uiv = (PFNGLVERTEXATTRIB4UIVPROC)load("glVertexAttrib4uiv"); + glad_glVertexAttrib4usv = (PFNGLVERTEXATTRIB4USVPROC)load("glVertexAttrib4usv"); + glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC)load("glVertexAttribPointer"); +} +static void load_GL_VERSION_2_1(GLADloadproc load) { + if(!GLAD_GL_VERSION_2_1) return; + glad_glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC)load("glUniformMatrix2x3fv"); + glad_glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)load("glUniformMatrix3x2fv"); + glad_glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)load("glUniformMatrix2x4fv"); + glad_glUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC)load("glUniformMatrix4x2fv"); + glad_glUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC)load("glUniformMatrix3x4fv"); + glad_glUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC)load("glUniformMatrix4x3fv"); +} +static void load_GL_VERSION_3_0(GLADloadproc load) { + if(!GLAD_GL_VERSION_3_0) return; + glad_glColorMaski = (PFNGLCOLORMASKIPROC)load("glColorMaski"); + glad_glGetBooleani_v = (PFNGLGETBOOLEANI_VPROC)load("glGetBooleani_v"); + glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); + glad_glEnablei = (PFNGLENABLEIPROC)load("glEnablei"); + glad_glDisablei = (PFNGLDISABLEIPROC)load("glDisablei"); + glad_glIsEnabledi = (PFNGLISENABLEDIPROC)load("glIsEnabledi"); + glad_glBeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC)load("glBeginTransformFeedback"); + glad_glEndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC)load("glEndTransformFeedback"); + glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); + glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); + glad_glTransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC)load("glTransformFeedbackVaryings"); + glad_glGetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)load("glGetTransformFeedbackVarying"); + glad_glClampColor = (PFNGLCLAMPCOLORPROC)load("glClampColor"); + glad_glBeginConditionalRender = (PFNGLBEGINCONDITIONALRENDERPROC)load("glBeginConditionalRender"); + glad_glEndConditionalRender = (PFNGLENDCONDITIONALRENDERPROC)load("glEndConditionalRender"); + glad_glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC)load("glVertexAttribIPointer"); + glad_glGetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC)load("glGetVertexAttribIiv"); + glad_glGetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC)load("glGetVertexAttribIuiv"); + glad_glVertexAttribI1i = (PFNGLVERTEXATTRIBI1IPROC)load("glVertexAttribI1i"); + glad_glVertexAttribI2i = (PFNGLVERTEXATTRIBI2IPROC)load("glVertexAttribI2i"); + glad_glVertexAttribI3i = (PFNGLVERTEXATTRIBI3IPROC)load("glVertexAttribI3i"); + glad_glVertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC)load("glVertexAttribI4i"); + glad_glVertexAttribI1ui = (PFNGLVERTEXATTRIBI1UIPROC)load("glVertexAttribI1ui"); + glad_glVertexAttribI2ui = (PFNGLVERTEXATTRIBI2UIPROC)load("glVertexAttribI2ui"); + glad_glVertexAttribI3ui = (PFNGLVERTEXATTRIBI3UIPROC)load("glVertexAttribI3ui"); + glad_glVertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC)load("glVertexAttribI4ui"); + glad_glVertexAttribI1iv = (PFNGLVERTEXATTRIBI1IVPROC)load("glVertexAttribI1iv"); + glad_glVertexAttribI2iv = (PFNGLVERTEXATTRIBI2IVPROC)load("glVertexAttribI2iv"); + glad_glVertexAttribI3iv = (PFNGLVERTEXATTRIBI3IVPROC)load("glVertexAttribI3iv"); + glad_glVertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC)load("glVertexAttribI4iv"); + glad_glVertexAttribI1uiv = (PFNGLVERTEXATTRIBI1UIVPROC)load("glVertexAttribI1uiv"); + glad_glVertexAttribI2uiv = (PFNGLVERTEXATTRIBI2UIVPROC)load("glVertexAttribI2uiv"); + glad_glVertexAttribI3uiv = (PFNGLVERTEXATTRIBI3UIVPROC)load("glVertexAttribI3uiv"); + glad_glVertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC)load("glVertexAttribI4uiv"); + glad_glVertexAttribI4bv = (PFNGLVERTEXATTRIBI4BVPROC)load("glVertexAttribI4bv"); + glad_glVertexAttribI4sv = (PFNGLVERTEXATTRIBI4SVPROC)load("glVertexAttribI4sv"); + glad_glVertexAttribI4ubv = (PFNGLVERTEXATTRIBI4UBVPROC)load("glVertexAttribI4ubv"); + glad_glVertexAttribI4usv = (PFNGLVERTEXATTRIBI4USVPROC)load("glVertexAttribI4usv"); + glad_glGetUniformuiv = (PFNGLGETUNIFORMUIVPROC)load("glGetUniformuiv"); + glad_glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC)load("glBindFragDataLocation"); + glad_glGetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC)load("glGetFragDataLocation"); + glad_glUniform1ui = (PFNGLUNIFORM1UIPROC)load("glUniform1ui"); + glad_glUniform2ui = (PFNGLUNIFORM2UIPROC)load("glUniform2ui"); + glad_glUniform3ui = (PFNGLUNIFORM3UIPROC)load("glUniform3ui"); + glad_glUniform4ui = (PFNGLUNIFORM4UIPROC)load("glUniform4ui"); + glad_glUniform1uiv = (PFNGLUNIFORM1UIVPROC)load("glUniform1uiv"); + glad_glUniform2uiv = (PFNGLUNIFORM2UIVPROC)load("glUniform2uiv"); + glad_glUniform3uiv = (PFNGLUNIFORM3UIVPROC)load("glUniform3uiv"); + glad_glUniform4uiv = (PFNGLUNIFORM4UIVPROC)load("glUniform4uiv"); + glad_glTexParameterIiv = (PFNGLTEXPARAMETERIIVPROC)load("glTexParameterIiv"); + glad_glTexParameterIuiv = (PFNGLTEXPARAMETERIUIVPROC)load("glTexParameterIuiv"); + glad_glGetTexParameterIiv = (PFNGLGETTEXPARAMETERIIVPROC)load("glGetTexParameterIiv"); + glad_glGetTexParameterIuiv = (PFNGLGETTEXPARAMETERIUIVPROC)load("glGetTexParameterIuiv"); + glad_glClearBufferiv = (PFNGLCLEARBUFFERIVPROC)load("glClearBufferiv"); + glad_glClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC)load("glClearBufferuiv"); + glad_glClearBufferfv = (PFNGLCLEARBUFFERFVPROC)load("glClearBufferfv"); + glad_glClearBufferfi = (PFNGLCLEARBUFFERFIPROC)load("glClearBufferfi"); + glad_glGetStringi = (PFNGLGETSTRINGIPROC)load("glGetStringi"); + glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC)load("glIsRenderbuffer"); + glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC)load("glBindRenderbuffer"); + glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC)load("glDeleteRenderbuffers"); + glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC)load("glGenRenderbuffers"); + glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC)load("glRenderbufferStorage"); + glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC)load("glGetRenderbufferParameteriv"); + glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC)load("glIsFramebuffer"); + glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC)load("glBindFramebuffer"); + glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC)load("glDeleteFramebuffers"); + glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC)load("glGenFramebuffers"); + glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC)load("glCheckFramebufferStatus"); + glad_glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC)load("glFramebufferTexture1D"); + glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC)load("glFramebufferTexture2D"); + glad_glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC)load("glFramebufferTexture3D"); + glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC)load("glFramebufferRenderbuffer"); + glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)load("glGetFramebufferAttachmentParameteriv"); + glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC)load("glGenerateMipmap"); + glad_glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC)load("glBlitFramebuffer"); + glad_glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)load("glRenderbufferStorageMultisample"); + glad_glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC)load("glFramebufferTextureLayer"); + glad_glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC)load("glMapBufferRange"); + glad_glFlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC)load("glFlushMappedBufferRange"); + glad_glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC)load("glBindVertexArray"); + glad_glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC)load("glDeleteVertexArrays"); + glad_glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC)load("glGenVertexArrays"); + glad_glIsVertexArray = (PFNGLISVERTEXARRAYPROC)load("glIsVertexArray"); +} +static void load_GL_VERSION_3_1(GLADloadproc load) { + if(!GLAD_GL_VERSION_3_1) return; + glad_glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC)load("glDrawArraysInstanced"); + glad_glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC)load("glDrawElementsInstanced"); + glad_glTexBuffer = (PFNGLTEXBUFFERPROC)load("glTexBuffer"); + glad_glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC)load("glPrimitiveRestartIndex"); + glad_glCopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC)load("glCopyBufferSubData"); + glad_glGetUniformIndices = (PFNGLGETUNIFORMINDICESPROC)load("glGetUniformIndices"); + glad_glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC)load("glGetActiveUniformsiv"); + glad_glGetActiveUniformName = (PFNGLGETACTIVEUNIFORMNAMEPROC)load("glGetActiveUniformName"); + glad_glGetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC)load("glGetUniformBlockIndex"); + glad_glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC)load("glGetActiveUniformBlockiv"); + glad_glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)load("glGetActiveUniformBlockName"); + glad_glUniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC)load("glUniformBlockBinding"); + glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); + glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); + glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); +} +static void load_GL_VERSION_3_2(GLADloadproc load) { + if(!GLAD_GL_VERSION_3_2) return; + glad_glDrawElementsBaseVertex = (PFNGLDRAWELEMENTSBASEVERTEXPROC)load("glDrawElementsBaseVertex"); + glad_glDrawRangeElementsBaseVertex = (PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)load("glDrawRangeElementsBaseVertex"); + glad_glDrawElementsInstancedBaseVertex = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)load("glDrawElementsInstancedBaseVertex"); + glad_glMultiDrawElementsBaseVertex = (PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)load("glMultiDrawElementsBaseVertex"); + glad_glProvokingVertex = (PFNGLPROVOKINGVERTEXPROC)load("glProvokingVertex"); + glad_glFenceSync = (PFNGLFENCESYNCPROC)load("glFenceSync"); + glad_glIsSync = (PFNGLISSYNCPROC)load("glIsSync"); + glad_glDeleteSync = (PFNGLDELETESYNCPROC)load("glDeleteSync"); + glad_glClientWaitSync = (PFNGLCLIENTWAITSYNCPROC)load("glClientWaitSync"); + glad_glWaitSync = (PFNGLWAITSYNCPROC)load("glWaitSync"); + glad_glGetInteger64v = (PFNGLGETINTEGER64VPROC)load("glGetInteger64v"); + glad_glGetSynciv = (PFNGLGETSYNCIVPROC)load("glGetSynciv"); + glad_glGetInteger64i_v = (PFNGLGETINTEGER64I_VPROC)load("glGetInteger64i_v"); + glad_glGetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC)load("glGetBufferParameteri64v"); + glad_glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC)load("glFramebufferTexture"); + glad_glTexImage2DMultisample = (PFNGLTEXIMAGE2DMULTISAMPLEPROC)load("glTexImage2DMultisample"); + glad_glTexImage3DMultisample = (PFNGLTEXIMAGE3DMULTISAMPLEPROC)load("glTexImage3DMultisample"); + glad_glGetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC)load("glGetMultisamplefv"); + glad_glSampleMaski = (PFNGLSAMPLEMASKIPROC)load("glSampleMaski"); +} +static void load_GL_VERSION_3_3(GLADloadproc load) { + if(!GLAD_GL_VERSION_3_3) return; + glad_glBindFragDataLocationIndexed = (PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)load("glBindFragDataLocationIndexed"); + glad_glGetFragDataIndex = (PFNGLGETFRAGDATAINDEXPROC)load("glGetFragDataIndex"); + glad_glGenSamplers = (PFNGLGENSAMPLERSPROC)load("glGenSamplers"); + glad_glDeleteSamplers = (PFNGLDELETESAMPLERSPROC)load("glDeleteSamplers"); + glad_glIsSampler = (PFNGLISSAMPLERPROC)load("glIsSampler"); + glad_glBindSampler = (PFNGLBINDSAMPLERPROC)load("glBindSampler"); + glad_glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC)load("glSamplerParameteri"); + glad_glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC)load("glSamplerParameteriv"); + glad_glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC)load("glSamplerParameterf"); + glad_glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC)load("glSamplerParameterfv"); + glad_glSamplerParameterIiv = (PFNGLSAMPLERPARAMETERIIVPROC)load("glSamplerParameterIiv"); + glad_glSamplerParameterIuiv = (PFNGLSAMPLERPARAMETERIUIVPROC)load("glSamplerParameterIuiv"); + glad_glGetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC)load("glGetSamplerParameteriv"); + glad_glGetSamplerParameterIiv = (PFNGLGETSAMPLERPARAMETERIIVPROC)load("glGetSamplerParameterIiv"); + glad_glGetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC)load("glGetSamplerParameterfv"); + glad_glGetSamplerParameterIuiv = (PFNGLGETSAMPLERPARAMETERIUIVPROC)load("glGetSamplerParameterIuiv"); + glad_glQueryCounter = (PFNGLQUERYCOUNTERPROC)load("glQueryCounter"); + glad_glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC)load("glGetQueryObjecti64v"); + glad_glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC)load("glGetQueryObjectui64v"); + glad_glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC)load("glVertexAttribDivisor"); + glad_glVertexAttribP1ui = (PFNGLVERTEXATTRIBP1UIPROC)load("glVertexAttribP1ui"); + glad_glVertexAttribP1uiv = (PFNGLVERTEXATTRIBP1UIVPROC)load("glVertexAttribP1uiv"); + glad_glVertexAttribP2ui = (PFNGLVERTEXATTRIBP2UIPROC)load("glVertexAttribP2ui"); + glad_glVertexAttribP2uiv = (PFNGLVERTEXATTRIBP2UIVPROC)load("glVertexAttribP2uiv"); + glad_glVertexAttribP3ui = (PFNGLVERTEXATTRIBP3UIPROC)load("glVertexAttribP3ui"); + glad_glVertexAttribP3uiv = (PFNGLVERTEXATTRIBP3UIVPROC)load("glVertexAttribP3uiv"); + glad_glVertexAttribP4ui = (PFNGLVERTEXATTRIBP4UIPROC)load("glVertexAttribP4ui"); + glad_glVertexAttribP4uiv = (PFNGLVERTEXATTRIBP4UIVPROC)load("glVertexAttribP4uiv"); + glad_glVertexP2ui = (PFNGLVERTEXP2UIPROC)load("glVertexP2ui"); + glad_glVertexP2uiv = (PFNGLVERTEXP2UIVPROC)load("glVertexP2uiv"); + glad_glVertexP3ui = (PFNGLVERTEXP3UIPROC)load("glVertexP3ui"); + glad_glVertexP3uiv = (PFNGLVERTEXP3UIVPROC)load("glVertexP3uiv"); + glad_glVertexP4ui = (PFNGLVERTEXP4UIPROC)load("glVertexP4ui"); + glad_glVertexP4uiv = (PFNGLVERTEXP4UIVPROC)load("glVertexP4uiv"); + glad_glTexCoordP1ui = (PFNGLTEXCOORDP1UIPROC)load("glTexCoordP1ui"); + glad_glTexCoordP1uiv = (PFNGLTEXCOORDP1UIVPROC)load("glTexCoordP1uiv"); + glad_glTexCoordP2ui = (PFNGLTEXCOORDP2UIPROC)load("glTexCoordP2ui"); + glad_glTexCoordP2uiv = (PFNGLTEXCOORDP2UIVPROC)load("glTexCoordP2uiv"); + glad_glTexCoordP3ui = (PFNGLTEXCOORDP3UIPROC)load("glTexCoordP3ui"); + glad_glTexCoordP3uiv = (PFNGLTEXCOORDP3UIVPROC)load("glTexCoordP3uiv"); + glad_glTexCoordP4ui = (PFNGLTEXCOORDP4UIPROC)load("glTexCoordP4ui"); + glad_glTexCoordP4uiv = (PFNGLTEXCOORDP4UIVPROC)load("glTexCoordP4uiv"); + glad_glMultiTexCoordP1ui = (PFNGLMULTITEXCOORDP1UIPROC)load("glMultiTexCoordP1ui"); + glad_glMultiTexCoordP1uiv = (PFNGLMULTITEXCOORDP1UIVPROC)load("glMultiTexCoordP1uiv"); + glad_glMultiTexCoordP2ui = (PFNGLMULTITEXCOORDP2UIPROC)load("glMultiTexCoordP2ui"); + glad_glMultiTexCoordP2uiv = (PFNGLMULTITEXCOORDP2UIVPROC)load("glMultiTexCoordP2uiv"); + glad_glMultiTexCoordP3ui = (PFNGLMULTITEXCOORDP3UIPROC)load("glMultiTexCoordP3ui"); + glad_glMultiTexCoordP3uiv = (PFNGLMULTITEXCOORDP3UIVPROC)load("glMultiTexCoordP3uiv"); + glad_glMultiTexCoordP4ui = (PFNGLMULTITEXCOORDP4UIPROC)load("glMultiTexCoordP4ui"); + glad_glMultiTexCoordP4uiv = (PFNGLMULTITEXCOORDP4UIVPROC)load("glMultiTexCoordP4uiv"); + glad_glNormalP3ui = (PFNGLNORMALP3UIPROC)load("glNormalP3ui"); + glad_glNormalP3uiv = (PFNGLNORMALP3UIVPROC)load("glNormalP3uiv"); + glad_glColorP3ui = (PFNGLCOLORP3UIPROC)load("glColorP3ui"); + glad_glColorP3uiv = (PFNGLCOLORP3UIVPROC)load("glColorP3uiv"); + glad_glColorP4ui = (PFNGLCOLORP4UIPROC)load("glColorP4ui"); + glad_glColorP4uiv = (PFNGLCOLORP4UIVPROC)load("glColorP4uiv"); + glad_glSecondaryColorP3ui = (PFNGLSECONDARYCOLORP3UIPROC)load("glSecondaryColorP3ui"); + glad_glSecondaryColorP3uiv = (PFNGLSECONDARYCOLORP3UIVPROC)load("glSecondaryColorP3uiv"); +} +static int find_extensionsGL(void) { + if (!get_exts()) return 0; + (void)&has_ext; + free_exts(); + return 1; +} + +static void find_coreGL(void) { + + /* Thank you @elmindreda + * https://github.com/elmindreda/greg/blob/master/templates/greg.c.in#L176 + * https://github.com/glfw/glfw/blob/master/src/context.c#L36 + */ + int i, major, minor; + + const char* version; + const char* prefixes[] = { + "OpenGL ES-CM ", + "OpenGL ES-CL ", + "OpenGL ES ", + NULL + }; + + version = (const char*) glGetString(GL_VERSION); + if (!version) return; + + for (i = 0; prefixes[i]; i++) { + const size_t length = strlen(prefixes[i]); + if (strncmp(version, prefixes[i], length) == 0) { + version += length; + break; + } + } + +/* PR #18 */ +#ifdef _MSC_VER + sscanf_s(version, "%d.%d", &major, &minor); +#else + sscanf(version, "%d.%d", &major, &minor); +#endif + + GLVersion.major = major; GLVersion.minor = minor; + max_loaded_major = major; max_loaded_minor = minor; + GLAD_GL_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1; + GLAD_GL_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1; + GLAD_GL_VERSION_1_2 = (major == 1 && minor >= 2) || major > 1; + GLAD_GL_VERSION_1_3 = (major == 1 && minor >= 3) || major > 1; + GLAD_GL_VERSION_1_4 = (major == 1 && minor >= 4) || major > 1; + GLAD_GL_VERSION_1_5 = (major == 1 && minor >= 5) || major > 1; + GLAD_GL_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2; + GLAD_GL_VERSION_2_1 = (major == 2 && minor >= 1) || major > 2; + GLAD_GL_VERSION_3_0 = (major == 3 && minor >= 0) || major > 3; + GLAD_GL_VERSION_3_1 = (major == 3 && minor >= 1) || major > 3; + GLAD_GL_VERSION_3_2 = (major == 3 && minor >= 2) || major > 3; + GLAD_GL_VERSION_3_3 = (major == 3 && minor >= 3) || major > 3; + if (GLVersion.major > 3 || (GLVersion.major >= 3 && GLVersion.minor >= 3)) { + max_loaded_major = 3; + max_loaded_minor = 3; + } +} + +int gladLoadGLLoader(GLADloadproc load) { + GLVersion.major = 0; GLVersion.minor = 0; + glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); + if(glGetString == NULL) return 0; + if(glGetString(GL_VERSION) == NULL) return 0; + find_coreGL(); + load_GL_VERSION_1_0(load); + load_GL_VERSION_1_1(load); + load_GL_VERSION_1_2(load); + load_GL_VERSION_1_3(load); + load_GL_VERSION_1_4(load); + load_GL_VERSION_1_5(load); + load_GL_VERSION_2_0(load); + load_GL_VERSION_2_1(load); + load_GL_VERSION_3_0(load); + load_GL_VERSION_3_1(load); + load_GL_VERSION_3_2(load); + load_GL_VERSION_3_3(load); + + if (!find_extensionsGL()) return 0; + return GLVersion.major != 0 || GLVersion.minor != 0; +} + diff --git a/gl/glad/glad.h b/gl/glad/glad.h new file mode 100644 index 0000000..0718199 --- /dev/null +++ b/gl/glad/glad.h @@ -0,0 +1,2509 @@ +/* + + OpenGL loader generated by glad 0.1.34 on Sun Jul 18 08:24:23 2021. + + Language/Generator: C/C++ Debug + Specification: gl + APIs: gl=3.3 + Profile: core + Extensions: + + Loader: True + Local files: False + Omit khrplatform: False + Reproducible: False + + Commandline: + --profile="core" --api="gl=3.3" --generator="c-debug" --spec="gl" --extensions="" + Online: + https://glad.dav1d.de/#profile=core&language=c-debug&specification=gl&loader=on&api=gl%3D3.3 +*/ + + +#ifndef __glad_h_ +#define __glad_h_ + +#ifdef __gl_h_ +#error OpenGL header already included, remove this include, glad already provides it +#endif +#define __gl_h_ + +#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) +#define APIENTRY __stdcall +#endif + +#ifndef APIENTRY +#define APIENTRY +#endif +#ifndef APIENTRYP +#define APIENTRYP APIENTRY * +#endif + +#ifndef GLAPIENTRY +#define GLAPIENTRY APIENTRY +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct gladGLversionStruct { + int major; + int minor; +}; + +typedef void* (* GLADloadproc)(const char *name); + +#ifndef GLAPI +# if defined(GLAD_GLAPI_EXPORT) +# if defined(_WIN32) || defined(__CYGWIN__) +# if defined(GLAD_GLAPI_EXPORT_BUILD) +# if defined(__GNUC__) +# define GLAPI __attribute__ ((dllexport)) extern +# else +# define GLAPI __declspec(dllexport) extern +# endif +# else +# if defined(__GNUC__) +# define GLAPI __attribute__ ((dllimport)) extern +# else +# define GLAPI __declspec(dllimport) extern +# endif +# endif +# elif defined(__GNUC__) && defined(GLAD_GLAPI_EXPORT_BUILD) +# define GLAPI __attribute__ ((visibility ("default"))) extern +# else +# define GLAPI extern +# endif +# else +# define GLAPI extern +# endif +#endif + +GLAPI struct gladGLversionStruct GLVersion; + +GLAPI int gladLoadGL(void); + +GLAPI int gladLoadGLLoader(GLADloadproc); + + +#define GLAD_DEBUG +typedef void (* GLADcallback)(const char *name, void *funcptr, int len_args, ...); + +GLAPI void glad_set_pre_callback(GLADcallback cb); +GLAPI void glad_set_post_callback(GLADcallback cb); +#include +typedef unsigned int GLenum; +typedef unsigned char GLboolean; +typedef unsigned int GLbitfield; +typedef void GLvoid; +typedef khronos_int8_t GLbyte; +typedef khronos_uint8_t GLubyte; +typedef khronos_int16_t GLshort; +typedef khronos_uint16_t GLushort; +typedef int GLint; +typedef unsigned int GLuint; +typedef khronos_int32_t GLclampx; +typedef int GLsizei; +typedef khronos_float_t GLfloat; +typedef khronos_float_t GLclampf; +typedef double GLdouble; +typedef double GLclampd; +typedef void *GLeglClientBufferEXT; +typedef void *GLeglImageOES; +typedef char GLchar; +typedef char GLcharARB; +#ifdef __APPLE__ +typedef void *GLhandleARB; +#else +typedef unsigned int GLhandleARB; +#endif +typedef khronos_uint16_t GLhalf; +typedef khronos_uint16_t GLhalfARB; +typedef khronos_int32_t GLfixed; +typedef khronos_intptr_t GLintptr; +typedef khronos_intptr_t GLintptrARB; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_ssize_t GLsizeiptrARB; +typedef khronos_int64_t GLint64; +typedef khronos_int64_t GLint64EXT; +typedef khronos_uint64_t GLuint64; +typedef khronos_uint64_t GLuint64EXT; +typedef struct __GLsync *GLsync; +struct _cl_context; +struct _cl_event; +typedef void (APIENTRY *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (APIENTRY *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam); +typedef unsigned short GLhalfNV; +typedef GLintptr GLvdpauSurfaceNV; +typedef void (APIENTRY *GLVULKANPROCNV)(void); +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_NONE 0 +#define GL_FRONT_LEFT 0x0400 +#define GL_FRONT_RIGHT 0x0401 +#define GL_BACK_LEFT 0x0402 +#define GL_BACK_RIGHT 0x0403 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_LEFT 0x0406 +#define GL_RIGHT 0x0407 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_POINT_SIZE 0x0B11 +#define GL_POINT_SIZE_RANGE 0x0B12 +#define GL_POINT_SIZE_GRANULARITY 0x0B13 +#define GL_LINE_SMOOTH 0x0B20 +#define GL_LINE_WIDTH 0x0B21 +#define GL_LINE_WIDTH_RANGE 0x0B22 +#define GL_LINE_WIDTH_GRANULARITY 0x0B23 +#define GL_POLYGON_MODE 0x0B40 +#define GL_POLYGON_SMOOTH 0x0B41 +#define GL_CULL_FACE 0x0B44 +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_TEST 0x0B71 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_TEST 0x0B90 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_VIEWPORT 0x0BA2 +#define GL_DITHER 0x0BD0 +#define GL_BLEND_DST 0x0BE0 +#define GL_BLEND_SRC 0x0BE1 +#define GL_BLEND 0x0BE2 +#define GL_LOGIC_OP_MODE 0x0BF0 +#define GL_DRAW_BUFFER 0x0C01 +#define GL_READ_BUFFER 0x0C02 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_DOUBLEBUFFER 0x0C32 +#define GL_STEREO 0x0C33 +#define GL_LINE_SMOOTH_HINT 0x0C52 +#define GL_POLYGON_SMOOTH_HINT 0x0C53 +#define GL_UNPACK_SWAP_BYTES 0x0CF0 +#define GL_UNPACK_LSB_FIRST 0x0CF1 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_SWAP_BYTES 0x0D00 +#define GL_PACK_LSB_FIRST 0x0D01 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_TEXTURE_1D 0x0DE0 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_BORDER_COLOR 0x1004 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_CLEAR 0x1500 +#define GL_AND 0x1501 +#define GL_AND_REVERSE 0x1502 +#define GL_COPY 0x1503 +#define GL_AND_INVERTED 0x1504 +#define GL_NOOP 0x1505 +#define GL_XOR 0x1506 +#define GL_OR 0x1507 +#define GL_NOR 0x1508 +#define GL_EQUIV 0x1509 +#define GL_INVERT 0x150A +#define GL_OR_REVERSE 0x150B +#define GL_COPY_INVERTED 0x150C +#define GL_OR_INVERTED 0x150D +#define GL_NAND 0x150E +#define GL_SET 0x150F +#define GL_TEXTURE 0x1702 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_STENCIL_INDEX 0x1901 +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_RED 0x1903 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_POINT 0x1B00 +#define GL_LINE 0x1B01 +#define GL_FILL 0x1B02 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_REPEAT 0x2901 +#define GL_COLOR_LOGIC_OP 0x0BF2 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_POINT 0x2A01 +#define GL_POLYGON_OFFSET_LINE 0x2A02 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_1D 0x8068 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_DOUBLE 0x140A +#define GL_PROXY_TEXTURE_1D 0x8063 +#define GL_PROXY_TEXTURE_2D 0x8064 +#define GL_R3_G3_B2 0x2A10 +#define GL_RGB4 0x804F +#define GL_RGB5 0x8050 +#define GL_RGB8 0x8051 +#define GL_RGB10 0x8052 +#define GL_RGB12 0x8053 +#define GL_RGB16 0x8054 +#define GL_RGBA2 0x8055 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_RGBA12 0x805A +#define GL_RGBA16 0x805B +#define GL_UNSIGNED_BYTE_3_3_2 0x8032 +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_INT_8_8_8_8 0x8035 +#define GL_UNSIGNED_INT_10_10_10_2 0x8036 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_PACK_SKIP_IMAGES 0x806B +#define GL_PACK_IMAGE_HEIGHT 0x806C +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_PROXY_TEXTURE_3D 0x8070 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366 +#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_BGR 0x80E0 +#define GL_BGRA 0x80E1 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12 +#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13 +#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22 +#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23 +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_MULTISAMPLE 0x809D +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE 0x809F +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_COMPRESSED_RGB 0x84ED +#define GL_COMPRESSED_RGBA 0x84EE +#define GL_TEXTURE_COMPRESSION_HINT 0x84EF +#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_CLAMP_TO_BORDER 0x812D +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_DEPTH_COMPONENT32 0x81A7 +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_LOD_BIAS 0x8501 +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_BLEND_COLOR 0x8005 +#define GL_BLEND_EQUATION 0x8009 +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_FUNC_ADD 0x8006 +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_FUNC_SUBTRACT 0x800A +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_QUERY_COUNTER_BITS 0x8864 +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_BUFFER_ACCESS 0x88BB +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_DRAW 0x88E0 +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_DRAW 0x88E4 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_SAMPLES_PASSED 0x8914 +#define GL_SRC1_ALPHA 0x8589 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_VERTEX_PROGRAM_POINT_SIZE 0x8642 +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_MAX_VARYING_FLOATS 0x8B4B +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_SHADER_TYPE 0x8B4F +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_DELETE_STATUS 0x8B80 +#define GL_COMPILE_STATUS 0x8B81 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_POINT_SPRITE_COORD_ORIGIN 0x8CA0 +#define GL_LOWER_LEFT 0x8CA1 +#define GL_UPPER_LEFT 0x8CA2 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB_ALPHA 0x8C42 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPRESSED_SRGB 0x8C48 +#define GL_COMPRESSED_SRGB_ALPHA 0x8C49 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_CLIP_DISTANCE0 0x3000 +#define GL_CLIP_DISTANCE1 0x3001 +#define GL_CLIP_DISTANCE2 0x3002 +#define GL_CLIP_DISTANCE3 0x3003 +#define GL_CLIP_DISTANCE4 0x3004 +#define GL_CLIP_DISTANCE5 0x3005 +#define GL_CLIP_DISTANCE6 0x3006 +#define GL_CLIP_DISTANCE7 0x3007 +#define GL_MAX_CLIP_DISTANCES 0x0D32 +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_CONTEXT_FLAGS 0x821E +#define GL_COMPRESSED_RED 0x8225 +#define GL_COMPRESSED_RG 0x8226 +#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001 +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_CLAMP_READ_COLOR 0x891C +#define GL_FIXED_ONLY 0x891D +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_1D_ARRAY 0x8C18 +#define GL_PROXY_TEXTURE_1D_ARRAY 0x8C19 +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_PROXY_TEXTURE_2D_ARRAY 0x8C1B +#define GL_TEXTURE_BINDING_1D_ARRAY 0x8C1C +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_PRIMITIVES_GENERATED 0x8C87 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_GREEN_INTEGER 0x8D95 +#define GL_BLUE_INTEGER 0x8D96 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_BGR_INTEGER 0x8D9A +#define GL_BGRA_INTEGER 0x8D9B +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_QUERY_WAIT 0x8E13 +#define GL_QUERY_NO_WAIT 0x8E14 +#define GL_QUERY_BY_REGION_WAIT 0x8E15 +#define GL_QUERY_BY_REGION_NO_WAIT 0x8E16 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER 0x8CDB +#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER 0x8CDC +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_STENCIL_INDEX1 0x8D46 +#define GL_STENCIL_INDEX4 0x8D47 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_STENCIL_INDEX16 0x8D49 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_FRAMEBUFFER_SRGB 0x8DB9 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_COMPRESSED_RED_RGTC1 0x8DBB +#define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC +#define GL_COMPRESSED_RG_RGTC2 0x8DBD +#define GL_COMPRESSED_SIGNED_RG_RGTC2 0x8DBE +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_R16 0x822A +#define GL_RG8 0x822B +#define GL_RG16 0x822C +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_TEXTURE_BUFFER 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D +#define GL_TEXTURE_RECTANGLE 0x84F5 +#define GL_TEXTURE_BINDING_RECTANGLE 0x84F6 +#define GL_PROXY_TEXTURE_RECTANGLE 0x84F7 +#define GL_MAX_RECTANGLE_TEXTURE_SIZE 0x84F8 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_R16_SNORM 0x8F98 +#define GL_RG16_SNORM 0x8F99 +#define GL_RGB16_SNORM 0x8F9A +#define GL_RGBA16_SNORM 0x8F9B +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART 0x8F9D +#define GL_PRIMITIVE_RESTART_INDEX 0x8F9E +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFF +#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001 +#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002 +#define GL_LINES_ADJACENCY 0x000A +#define GL_LINE_STRIP_ADJACENCY 0x000B +#define GL_TRIANGLES_ADJACENCY 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D +#define GL_PROGRAM_POINT_SIZE 0x8642 +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8 +#define GL_GEOMETRY_SHADER 0x8DD9 +#define GL_GEOMETRY_VERTICES_OUT 0x8916 +#define GL_GEOMETRY_INPUT_TYPE 0x8917 +#define GL_GEOMETRY_OUTPUT_TYPE 0x8918 +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1 +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_CONTEXT_PROFILE_MASK 0x9126 +#define GL_DEPTH_CLAMP 0x864F +#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION 0x8E4C +#define GL_FIRST_VERTEX_CONVENTION 0x8E4D +#define GL_LAST_VERTEX_CONVENTION 0x8E4E +#define GL_PROVOKING_VERTEX 0x8E4F +#define GL_TEXTURE_CUBE_MAP_SEAMLESS 0x884F +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFF +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE 0x9101 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9103 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_SRC1_COLOR 0x88F9 +#define GL_ONE_MINUS_SRC1_COLOR 0x88FA +#define GL_ONE_MINUS_SRC1_ALPHA 0x88FB +#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS 0x88FC +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46 +#define GL_TIME_ELAPSED 0x88BF +#define GL_TIMESTAMP 0x8E28 +#define GL_INT_2_10_10_10_REV 0x8D9F +#ifndef GL_VERSION_1_0 +#define GL_VERSION_1_0 1 +GLAPI int GLAD_GL_VERSION_1_0; +typedef void (APIENTRYP PFNGLCULLFACEPROC)(GLenum mode); +GLAPI PFNGLCULLFACEPROC glad_glCullFace; +GLAPI PFNGLCULLFACEPROC glad_debug_glCullFace; +#define glCullFace glad_debug_glCullFace +typedef void (APIENTRYP PFNGLFRONTFACEPROC)(GLenum mode); +GLAPI PFNGLFRONTFACEPROC glad_glFrontFace; +GLAPI PFNGLFRONTFACEPROC glad_debug_glFrontFace; +#define glFrontFace glad_debug_glFrontFace +typedef void (APIENTRYP PFNGLHINTPROC)(GLenum target, GLenum mode); +GLAPI PFNGLHINTPROC glad_glHint; +GLAPI PFNGLHINTPROC glad_debug_glHint; +#define glHint glad_debug_glHint +typedef void (APIENTRYP PFNGLLINEWIDTHPROC)(GLfloat width); +GLAPI PFNGLLINEWIDTHPROC glad_glLineWidth; +GLAPI PFNGLLINEWIDTHPROC glad_debug_glLineWidth; +#define glLineWidth glad_debug_glLineWidth +typedef void (APIENTRYP PFNGLPOINTSIZEPROC)(GLfloat size); +GLAPI PFNGLPOINTSIZEPROC glad_glPointSize; +GLAPI PFNGLPOINTSIZEPROC glad_debug_glPointSize; +#define glPointSize glad_debug_glPointSize +typedef void (APIENTRYP PFNGLPOLYGONMODEPROC)(GLenum face, GLenum mode); +GLAPI PFNGLPOLYGONMODEPROC glad_glPolygonMode; +GLAPI PFNGLPOLYGONMODEPROC glad_debug_glPolygonMode; +#define glPolygonMode glad_debug_glPolygonMode +typedef void (APIENTRYP PFNGLSCISSORPROC)(GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI PFNGLSCISSORPROC glad_glScissor; +GLAPI PFNGLSCISSORPROC glad_debug_glScissor; +#define glScissor glad_debug_glScissor +typedef void (APIENTRYP PFNGLTEXPARAMETERFPROC)(GLenum target, GLenum pname, GLfloat param); +GLAPI PFNGLTEXPARAMETERFPROC glad_glTexParameterf; +GLAPI PFNGLTEXPARAMETERFPROC glad_debug_glTexParameterf; +#define glTexParameterf glad_debug_glTexParameterf +typedef void (APIENTRYP PFNGLTEXPARAMETERFVPROC)(GLenum target, GLenum pname, const GLfloat *params); +GLAPI PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv; +GLAPI PFNGLTEXPARAMETERFVPROC glad_debug_glTexParameterfv; +#define glTexParameterfv glad_debug_glTexParameterfv +typedef void (APIENTRYP PFNGLTEXPARAMETERIPROC)(GLenum target, GLenum pname, GLint param); +GLAPI PFNGLTEXPARAMETERIPROC glad_glTexParameteri; +GLAPI PFNGLTEXPARAMETERIPROC glad_debug_glTexParameteri; +#define glTexParameteri glad_debug_glTexParameteri +typedef void (APIENTRYP PFNGLTEXPARAMETERIVPROC)(GLenum target, GLenum pname, const GLint *params); +GLAPI PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv; +GLAPI PFNGLTEXPARAMETERIVPROC glad_debug_glTexParameteriv; +#define glTexParameteriv glad_debug_glTexParameteriv +typedef void (APIENTRYP PFNGLTEXIMAGE1DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXIMAGE1DPROC glad_glTexImage1D; +GLAPI PFNGLTEXIMAGE1DPROC glad_debug_glTexImage1D; +#define glTexImage1D glad_debug_glTexImage1D +typedef void (APIENTRYP PFNGLTEXIMAGE2DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXIMAGE2DPROC glad_glTexImage2D; +GLAPI PFNGLTEXIMAGE2DPROC glad_debug_glTexImage2D; +#define glTexImage2D glad_debug_glTexImage2D +typedef void (APIENTRYP PFNGLDRAWBUFFERPROC)(GLenum buf); +GLAPI PFNGLDRAWBUFFERPROC glad_glDrawBuffer; +GLAPI PFNGLDRAWBUFFERPROC glad_debug_glDrawBuffer; +#define glDrawBuffer glad_debug_glDrawBuffer +typedef void (APIENTRYP PFNGLCLEARPROC)(GLbitfield mask); +GLAPI PFNGLCLEARPROC glad_glClear; +GLAPI PFNGLCLEARPROC glad_debug_glClear; +#define glClear glad_debug_glClear +typedef void (APIENTRYP PFNGLCLEARCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GLAPI PFNGLCLEARCOLORPROC glad_glClearColor; +GLAPI PFNGLCLEARCOLORPROC glad_debug_glClearColor; +#define glClearColor glad_debug_glClearColor +typedef void (APIENTRYP PFNGLCLEARSTENCILPROC)(GLint s); +GLAPI PFNGLCLEARSTENCILPROC glad_glClearStencil; +GLAPI PFNGLCLEARSTENCILPROC glad_debug_glClearStencil; +#define glClearStencil glad_debug_glClearStencil +typedef void (APIENTRYP PFNGLCLEARDEPTHPROC)(GLdouble depth); +GLAPI PFNGLCLEARDEPTHPROC glad_glClearDepth; +GLAPI PFNGLCLEARDEPTHPROC glad_debug_glClearDepth; +#define glClearDepth glad_debug_glClearDepth +typedef void (APIENTRYP PFNGLSTENCILMASKPROC)(GLuint mask); +GLAPI PFNGLSTENCILMASKPROC glad_glStencilMask; +GLAPI PFNGLSTENCILMASKPROC glad_debug_glStencilMask; +#define glStencilMask glad_debug_glStencilMask +typedef void (APIENTRYP PFNGLCOLORMASKPROC)(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GLAPI PFNGLCOLORMASKPROC glad_glColorMask; +GLAPI PFNGLCOLORMASKPROC glad_debug_glColorMask; +#define glColorMask glad_debug_glColorMask +typedef void (APIENTRYP PFNGLDEPTHMASKPROC)(GLboolean flag); +GLAPI PFNGLDEPTHMASKPROC glad_glDepthMask; +GLAPI PFNGLDEPTHMASKPROC glad_debug_glDepthMask; +#define glDepthMask glad_debug_glDepthMask +typedef void (APIENTRYP PFNGLDISABLEPROC)(GLenum cap); +GLAPI PFNGLDISABLEPROC glad_glDisable; +GLAPI PFNGLDISABLEPROC glad_debug_glDisable; +#define glDisable glad_debug_glDisable +typedef void (APIENTRYP PFNGLENABLEPROC)(GLenum cap); +GLAPI PFNGLENABLEPROC glad_glEnable; +GLAPI PFNGLENABLEPROC glad_debug_glEnable; +#define glEnable glad_debug_glEnable +typedef void (APIENTRYP PFNGLFINISHPROC)(void); +GLAPI PFNGLFINISHPROC glad_glFinish; +GLAPI PFNGLFINISHPROC glad_debug_glFinish; +#define glFinish glad_debug_glFinish +typedef void (APIENTRYP PFNGLFLUSHPROC)(void); +GLAPI PFNGLFLUSHPROC glad_glFlush; +GLAPI PFNGLFLUSHPROC glad_debug_glFlush; +#define glFlush glad_debug_glFlush +typedef void (APIENTRYP PFNGLBLENDFUNCPROC)(GLenum sfactor, GLenum dfactor); +GLAPI PFNGLBLENDFUNCPROC glad_glBlendFunc; +GLAPI PFNGLBLENDFUNCPROC glad_debug_glBlendFunc; +#define glBlendFunc glad_debug_glBlendFunc +typedef void (APIENTRYP PFNGLLOGICOPPROC)(GLenum opcode); +GLAPI PFNGLLOGICOPPROC glad_glLogicOp; +GLAPI PFNGLLOGICOPPROC glad_debug_glLogicOp; +#define glLogicOp glad_debug_glLogicOp +typedef void (APIENTRYP PFNGLSTENCILFUNCPROC)(GLenum func, GLint ref, GLuint mask); +GLAPI PFNGLSTENCILFUNCPROC glad_glStencilFunc; +GLAPI PFNGLSTENCILFUNCPROC glad_debug_glStencilFunc; +#define glStencilFunc glad_debug_glStencilFunc +typedef void (APIENTRYP PFNGLSTENCILOPPROC)(GLenum fail, GLenum zfail, GLenum zpass); +GLAPI PFNGLSTENCILOPPROC glad_glStencilOp; +GLAPI PFNGLSTENCILOPPROC glad_debug_glStencilOp; +#define glStencilOp glad_debug_glStencilOp +typedef void (APIENTRYP PFNGLDEPTHFUNCPROC)(GLenum func); +GLAPI PFNGLDEPTHFUNCPROC glad_glDepthFunc; +GLAPI PFNGLDEPTHFUNCPROC glad_debug_glDepthFunc; +#define glDepthFunc glad_debug_glDepthFunc +typedef void (APIENTRYP PFNGLPIXELSTOREFPROC)(GLenum pname, GLfloat param); +GLAPI PFNGLPIXELSTOREFPROC glad_glPixelStoref; +GLAPI PFNGLPIXELSTOREFPROC glad_debug_glPixelStoref; +#define glPixelStoref glad_debug_glPixelStoref +typedef void (APIENTRYP PFNGLPIXELSTOREIPROC)(GLenum pname, GLint param); +GLAPI PFNGLPIXELSTOREIPROC glad_glPixelStorei; +GLAPI PFNGLPIXELSTOREIPROC glad_debug_glPixelStorei; +#define glPixelStorei glad_debug_glPixelStorei +typedef void (APIENTRYP PFNGLREADBUFFERPROC)(GLenum src); +GLAPI PFNGLREADBUFFERPROC glad_glReadBuffer; +GLAPI PFNGLREADBUFFERPROC glad_debug_glReadBuffer; +#define glReadBuffer glad_debug_glReadBuffer +typedef void (APIENTRYP PFNGLREADPIXELSPROC)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GLAPI PFNGLREADPIXELSPROC glad_glReadPixels; +GLAPI PFNGLREADPIXELSPROC glad_debug_glReadPixels; +#define glReadPixels glad_debug_glReadPixels +typedef void (APIENTRYP PFNGLGETBOOLEANVPROC)(GLenum pname, GLboolean *data); +GLAPI PFNGLGETBOOLEANVPROC glad_glGetBooleanv; +GLAPI PFNGLGETBOOLEANVPROC glad_debug_glGetBooleanv; +#define glGetBooleanv glad_debug_glGetBooleanv +typedef void (APIENTRYP PFNGLGETDOUBLEVPROC)(GLenum pname, GLdouble *data); +GLAPI PFNGLGETDOUBLEVPROC glad_glGetDoublev; +GLAPI PFNGLGETDOUBLEVPROC glad_debug_glGetDoublev; +#define glGetDoublev glad_debug_glGetDoublev +typedef GLenum (APIENTRYP PFNGLGETERRORPROC)(void); +GLAPI PFNGLGETERRORPROC glad_glGetError; +GLAPI PFNGLGETERRORPROC glad_debug_glGetError; +#define glGetError glad_debug_glGetError +typedef void (APIENTRYP PFNGLGETFLOATVPROC)(GLenum pname, GLfloat *data); +GLAPI PFNGLGETFLOATVPROC glad_glGetFloatv; +GLAPI PFNGLGETFLOATVPROC glad_debug_glGetFloatv; +#define glGetFloatv glad_debug_glGetFloatv +typedef void (APIENTRYP PFNGLGETINTEGERVPROC)(GLenum pname, GLint *data); +GLAPI PFNGLGETINTEGERVPROC glad_glGetIntegerv; +GLAPI PFNGLGETINTEGERVPROC glad_debug_glGetIntegerv; +#define glGetIntegerv glad_debug_glGetIntegerv +typedef const GLubyte * (APIENTRYP PFNGLGETSTRINGPROC)(GLenum name); +GLAPI PFNGLGETSTRINGPROC glad_glGetString; +GLAPI PFNGLGETSTRINGPROC glad_debug_glGetString; +#define glGetString glad_debug_glGetString +typedef void (APIENTRYP PFNGLGETTEXIMAGEPROC)(GLenum target, GLint level, GLenum format, GLenum type, void *pixels); +GLAPI PFNGLGETTEXIMAGEPROC glad_glGetTexImage; +GLAPI PFNGLGETTEXIMAGEPROC glad_debug_glGetTexImage; +#define glGetTexImage glad_debug_glGetTexImage +typedef void (APIENTRYP PFNGLGETTEXPARAMETERFVPROC)(GLenum target, GLenum pname, GLfloat *params); +GLAPI PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv; +GLAPI PFNGLGETTEXPARAMETERFVPROC glad_debug_glGetTexParameterfv; +#define glGetTexParameterfv glad_debug_glGetTexParameterfv +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIVPROC)(GLenum target, GLenum pname, GLint *params); +GLAPI PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv; +GLAPI PFNGLGETTEXPARAMETERIVPROC glad_debug_glGetTexParameteriv; +#define glGetTexParameteriv glad_debug_glGetTexParameteriv +typedef void (APIENTRYP PFNGLGETTEXLEVELPARAMETERFVPROC)(GLenum target, GLint level, GLenum pname, GLfloat *params); +GLAPI PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv; +GLAPI PFNGLGETTEXLEVELPARAMETERFVPROC glad_debug_glGetTexLevelParameterfv; +#define glGetTexLevelParameterfv glad_debug_glGetTexLevelParameterfv +typedef void (APIENTRYP PFNGLGETTEXLEVELPARAMETERIVPROC)(GLenum target, GLint level, GLenum pname, GLint *params); +GLAPI PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv; +GLAPI PFNGLGETTEXLEVELPARAMETERIVPROC glad_debug_glGetTexLevelParameteriv; +#define glGetTexLevelParameteriv glad_debug_glGetTexLevelParameteriv +typedef GLboolean (APIENTRYP PFNGLISENABLEDPROC)(GLenum cap); +GLAPI PFNGLISENABLEDPROC glad_glIsEnabled; +GLAPI PFNGLISENABLEDPROC glad_debug_glIsEnabled; +#define glIsEnabled glad_debug_glIsEnabled +typedef void (APIENTRYP PFNGLDEPTHRANGEPROC)(GLdouble n, GLdouble f); +GLAPI PFNGLDEPTHRANGEPROC glad_glDepthRange; +GLAPI PFNGLDEPTHRANGEPROC glad_debug_glDepthRange; +#define glDepthRange glad_debug_glDepthRange +typedef void (APIENTRYP PFNGLVIEWPORTPROC)(GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI PFNGLVIEWPORTPROC glad_glViewport; +GLAPI PFNGLVIEWPORTPROC glad_debug_glViewport; +#define glViewport glad_debug_glViewport +#endif +#ifndef GL_VERSION_1_1 +#define GL_VERSION_1_1 1 +GLAPI int GLAD_GL_VERSION_1_1; +typedef void (APIENTRYP PFNGLDRAWARRAYSPROC)(GLenum mode, GLint first, GLsizei count); +GLAPI PFNGLDRAWARRAYSPROC glad_glDrawArrays; +GLAPI PFNGLDRAWARRAYSPROC glad_debug_glDrawArrays; +#define glDrawArrays glad_debug_glDrawArrays +typedef void (APIENTRYP PFNGLDRAWELEMENTSPROC)(GLenum mode, GLsizei count, GLenum type, const void *indices); +GLAPI PFNGLDRAWELEMENTSPROC glad_glDrawElements; +GLAPI PFNGLDRAWELEMENTSPROC glad_debug_glDrawElements; +#define glDrawElements glad_debug_glDrawElements +typedef void (APIENTRYP PFNGLPOLYGONOFFSETPROC)(GLfloat factor, GLfloat units); +GLAPI PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset; +GLAPI PFNGLPOLYGONOFFSETPROC glad_debug_glPolygonOffset; +#define glPolygonOffset glad_debug_glPolygonOffset +typedef void (APIENTRYP PFNGLCOPYTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border); +GLAPI PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D; +GLAPI PFNGLCOPYTEXIMAGE1DPROC glad_debug_glCopyTexImage1D; +#define glCopyTexImage1D glad_debug_glCopyTexImage1D +typedef void (APIENTRYP PFNGLCOPYTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GLAPI PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D; +GLAPI PFNGLCOPYTEXIMAGE2DPROC glad_debug_glCopyTexImage2D; +#define glCopyTexImage2D glad_debug_glCopyTexImage2D +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width); +GLAPI PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D; +GLAPI PFNGLCOPYTEXSUBIMAGE1DPROC glad_debug_glCopyTexSubImage1D; +#define glCopyTexSubImage1D glad_debug_glCopyTexSubImage1D +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D; +GLAPI PFNGLCOPYTEXSUBIMAGE2DPROC glad_debug_glCopyTexSubImage2D; +#define glCopyTexSubImage2D glad_debug_glCopyTexSubImage2D +typedef void (APIENTRYP PFNGLTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D; +GLAPI PFNGLTEXSUBIMAGE1DPROC glad_debug_glTexSubImage1D; +#define glTexSubImage1D glad_debug_glTexSubImage1D +typedef void (APIENTRYP PFNGLTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D; +GLAPI PFNGLTEXSUBIMAGE2DPROC glad_debug_glTexSubImage2D; +#define glTexSubImage2D glad_debug_glTexSubImage2D +typedef void (APIENTRYP PFNGLBINDTEXTUREPROC)(GLenum target, GLuint texture); +GLAPI PFNGLBINDTEXTUREPROC glad_glBindTexture; +GLAPI PFNGLBINDTEXTUREPROC glad_debug_glBindTexture; +#define glBindTexture glad_debug_glBindTexture +typedef void (APIENTRYP PFNGLDELETETEXTURESPROC)(GLsizei n, const GLuint *textures); +GLAPI PFNGLDELETETEXTURESPROC glad_glDeleteTextures; +GLAPI PFNGLDELETETEXTURESPROC glad_debug_glDeleteTextures; +#define glDeleteTextures glad_debug_glDeleteTextures +typedef void (APIENTRYP PFNGLGENTEXTURESPROC)(GLsizei n, GLuint *textures); +GLAPI PFNGLGENTEXTURESPROC glad_glGenTextures; +GLAPI PFNGLGENTEXTURESPROC glad_debug_glGenTextures; +#define glGenTextures glad_debug_glGenTextures +typedef GLboolean (APIENTRYP PFNGLISTEXTUREPROC)(GLuint texture); +GLAPI PFNGLISTEXTUREPROC glad_glIsTexture; +GLAPI PFNGLISTEXTUREPROC glad_debug_glIsTexture; +#define glIsTexture glad_debug_glIsTexture +#endif +#ifndef GL_VERSION_1_2 +#define GL_VERSION_1_2 1 +GLAPI int GLAD_GL_VERSION_1_2; +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GLAPI PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements; +GLAPI PFNGLDRAWRANGEELEMENTSPROC glad_debug_glDrawRangeElements; +#define glDrawRangeElements glad_debug_glDrawRangeElements +typedef void (APIENTRYP PFNGLTEXIMAGE3DPROC)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXIMAGE3DPROC glad_glTexImage3D; +GLAPI PFNGLTEXIMAGE3DPROC glad_debug_glTexImage3D; +#define glTexImage3D glad_debug_glTexImage3D +typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GLAPI PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D; +GLAPI PFNGLTEXSUBIMAGE3DPROC glad_debug_glTexSubImage3D; +#define glTexSubImage3D glad_debug_glTexSubImage3D +typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GLAPI PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D; +GLAPI PFNGLCOPYTEXSUBIMAGE3DPROC glad_debug_glCopyTexSubImage3D; +#define glCopyTexSubImage3D glad_debug_glCopyTexSubImage3D +#endif +#ifndef GL_VERSION_1_3 +#define GL_VERSION_1_3 1 +GLAPI int GLAD_GL_VERSION_1_3; +typedef void (APIENTRYP PFNGLACTIVETEXTUREPROC)(GLenum texture); +GLAPI PFNGLACTIVETEXTUREPROC glad_glActiveTexture; +GLAPI PFNGLACTIVETEXTUREPROC glad_debug_glActiveTexture; +#define glActiveTexture glad_debug_glActiveTexture +typedef void (APIENTRYP PFNGLSAMPLECOVERAGEPROC)(GLfloat value, GLboolean invert); +GLAPI PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage; +GLAPI PFNGLSAMPLECOVERAGEPROC glad_debug_glSampleCoverage; +#define glSampleCoverage glad_debug_glSampleCoverage +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D; +GLAPI PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_debug_glCompressedTexImage3D; +#define glCompressedTexImage3D glad_debug_glCompressedTexImage3D +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D; +GLAPI PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_debug_glCompressedTexImage2D; +#define glCompressedTexImage2D glad_debug_glCompressedTexImage2D +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DPROC)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D; +GLAPI PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_debug_glCompressedTexImage1D; +#define glCompressedTexImage1D glad_debug_glCompressedTexImage1D +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D; +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_debug_glCompressedTexSubImage3D; +#define glCompressedTexSubImage3D glad_debug_glCompressedTexSubImage3D +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D; +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_debug_glCompressedTexSubImage2D; +#define glCompressedTexSubImage2D glad_debug_glCompressedTexSubImage2D +typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data); +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D; +GLAPI PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_debug_glCompressedTexSubImage1D; +#define glCompressedTexSubImage1D glad_debug_glCompressedTexSubImage1D +typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEPROC)(GLenum target, GLint level, void *img); +GLAPI PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage; +GLAPI PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_debug_glGetCompressedTexImage; +#define glGetCompressedTexImage glad_debug_glGetCompressedTexImage +#endif +#ifndef GL_VERSION_1_4 +#define GL_VERSION_1_4 1 +GLAPI int GLAD_GL_VERSION_1_4; +typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEPROC)(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GLAPI PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate; +GLAPI PFNGLBLENDFUNCSEPARATEPROC glad_debug_glBlendFuncSeparate; +#define glBlendFuncSeparate glad_debug_glBlendFuncSeparate +typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSPROC)(GLenum mode, const GLint *first, const GLsizei *count, GLsizei drawcount); +GLAPI PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays; +GLAPI PFNGLMULTIDRAWARRAYSPROC glad_debug_glMultiDrawArrays; +#define glMultiDrawArrays glad_debug_glMultiDrawArrays +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSPROC)(GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount); +GLAPI PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements; +GLAPI PFNGLMULTIDRAWELEMENTSPROC glad_debug_glMultiDrawElements; +#define glMultiDrawElements glad_debug_glMultiDrawElements +typedef void (APIENTRYP PFNGLPOINTPARAMETERFPROC)(GLenum pname, GLfloat param); +GLAPI PFNGLPOINTPARAMETERFPROC glad_glPointParameterf; +GLAPI PFNGLPOINTPARAMETERFPROC glad_debug_glPointParameterf; +#define glPointParameterf glad_debug_glPointParameterf +typedef void (APIENTRYP PFNGLPOINTPARAMETERFVPROC)(GLenum pname, const GLfloat *params); +GLAPI PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv; +GLAPI PFNGLPOINTPARAMETERFVPROC glad_debug_glPointParameterfv; +#define glPointParameterfv glad_debug_glPointParameterfv +typedef void (APIENTRYP PFNGLPOINTPARAMETERIPROC)(GLenum pname, GLint param); +GLAPI PFNGLPOINTPARAMETERIPROC glad_glPointParameteri; +GLAPI PFNGLPOINTPARAMETERIPROC glad_debug_glPointParameteri; +#define glPointParameteri glad_debug_glPointParameteri +typedef void (APIENTRYP PFNGLPOINTPARAMETERIVPROC)(GLenum pname, const GLint *params); +GLAPI PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv; +GLAPI PFNGLPOINTPARAMETERIVPROC glad_debug_glPointParameteriv; +#define glPointParameteriv glad_debug_glPointParameteriv +typedef void (APIENTRYP PFNGLBLENDCOLORPROC)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GLAPI PFNGLBLENDCOLORPROC glad_glBlendColor; +GLAPI PFNGLBLENDCOLORPROC glad_debug_glBlendColor; +#define glBlendColor glad_debug_glBlendColor +typedef void (APIENTRYP PFNGLBLENDEQUATIONPROC)(GLenum mode); +GLAPI PFNGLBLENDEQUATIONPROC glad_glBlendEquation; +GLAPI PFNGLBLENDEQUATIONPROC glad_debug_glBlendEquation; +#define glBlendEquation glad_debug_glBlendEquation +#endif +#ifndef GL_VERSION_1_5 +#define GL_VERSION_1_5 1 +GLAPI int GLAD_GL_VERSION_1_5; +typedef void (APIENTRYP PFNGLGENQUERIESPROC)(GLsizei n, GLuint *ids); +GLAPI PFNGLGENQUERIESPROC glad_glGenQueries; +GLAPI PFNGLGENQUERIESPROC glad_debug_glGenQueries; +#define glGenQueries glad_debug_glGenQueries +typedef void (APIENTRYP PFNGLDELETEQUERIESPROC)(GLsizei n, const GLuint *ids); +GLAPI PFNGLDELETEQUERIESPROC glad_glDeleteQueries; +GLAPI PFNGLDELETEQUERIESPROC glad_debug_glDeleteQueries; +#define glDeleteQueries glad_debug_glDeleteQueries +typedef GLboolean (APIENTRYP PFNGLISQUERYPROC)(GLuint id); +GLAPI PFNGLISQUERYPROC glad_glIsQuery; +GLAPI PFNGLISQUERYPROC glad_debug_glIsQuery; +#define glIsQuery glad_debug_glIsQuery +typedef void (APIENTRYP PFNGLBEGINQUERYPROC)(GLenum target, GLuint id); +GLAPI PFNGLBEGINQUERYPROC glad_glBeginQuery; +GLAPI PFNGLBEGINQUERYPROC glad_debug_glBeginQuery; +#define glBeginQuery glad_debug_glBeginQuery +typedef void (APIENTRYP PFNGLENDQUERYPROC)(GLenum target); +GLAPI PFNGLENDQUERYPROC glad_glEndQuery; +GLAPI PFNGLENDQUERYPROC glad_debug_glEndQuery; +#define glEndQuery glad_debug_glEndQuery +typedef void (APIENTRYP PFNGLGETQUERYIVPROC)(GLenum target, GLenum pname, GLint *params); +GLAPI PFNGLGETQUERYIVPROC glad_glGetQueryiv; +GLAPI PFNGLGETQUERYIVPROC glad_debug_glGetQueryiv; +#define glGetQueryiv glad_debug_glGetQueryiv +typedef void (APIENTRYP PFNGLGETQUERYOBJECTIVPROC)(GLuint id, GLenum pname, GLint *params); +GLAPI PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv; +GLAPI PFNGLGETQUERYOBJECTIVPROC glad_debug_glGetQueryObjectiv; +#define glGetQueryObjectiv glad_debug_glGetQueryObjectiv +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUIVPROC)(GLuint id, GLenum pname, GLuint *params); +GLAPI PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv; +GLAPI PFNGLGETQUERYOBJECTUIVPROC glad_debug_glGetQueryObjectuiv; +#define glGetQueryObjectuiv glad_debug_glGetQueryObjectuiv +typedef void (APIENTRYP PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer); +GLAPI PFNGLBINDBUFFERPROC glad_glBindBuffer; +GLAPI PFNGLBINDBUFFERPROC glad_debug_glBindBuffer; +#define glBindBuffer glad_debug_glBindBuffer +typedef void (APIENTRYP PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint *buffers); +GLAPI PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers; +GLAPI PFNGLDELETEBUFFERSPROC glad_debug_glDeleteBuffers; +#define glDeleteBuffers glad_debug_glDeleteBuffers +typedef void (APIENTRYP PFNGLGENBUFFERSPROC)(GLsizei n, GLuint *buffers); +GLAPI PFNGLGENBUFFERSPROC glad_glGenBuffers; +GLAPI PFNGLGENBUFFERSPROC glad_debug_glGenBuffers; +#define glGenBuffers glad_debug_glGenBuffers +typedef GLboolean (APIENTRYP PFNGLISBUFFERPROC)(GLuint buffer); +GLAPI PFNGLISBUFFERPROC glad_glIsBuffer; +GLAPI PFNGLISBUFFERPROC glad_debug_glIsBuffer; +#define glIsBuffer glad_debug_glIsBuffer +typedef void (APIENTRYP PFNGLBUFFERDATAPROC)(GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GLAPI PFNGLBUFFERDATAPROC glad_glBufferData; +GLAPI PFNGLBUFFERDATAPROC glad_debug_glBufferData; +#define glBufferData glad_debug_glBufferData +typedef void (APIENTRYP PFNGLBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GLAPI PFNGLBUFFERSUBDATAPROC glad_glBufferSubData; +GLAPI PFNGLBUFFERSUBDATAPROC glad_debug_glBufferSubData; +#define glBufferSubData glad_debug_glBufferSubData +typedef void (APIENTRYP PFNGLGETBUFFERSUBDATAPROC)(GLenum target, GLintptr offset, GLsizeiptr size, void *data); +GLAPI PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData; +GLAPI PFNGLGETBUFFERSUBDATAPROC glad_debug_glGetBufferSubData; +#define glGetBufferSubData glad_debug_glGetBufferSubData +typedef void * (APIENTRYP PFNGLMAPBUFFERPROC)(GLenum target, GLenum access); +GLAPI PFNGLMAPBUFFERPROC glad_glMapBuffer; +GLAPI PFNGLMAPBUFFERPROC glad_debug_glMapBuffer; +#define glMapBuffer glad_debug_glMapBuffer +typedef GLboolean (APIENTRYP PFNGLUNMAPBUFFERPROC)(GLenum target); +GLAPI PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer; +GLAPI PFNGLUNMAPBUFFERPROC glad_debug_glUnmapBuffer; +#define glUnmapBuffer glad_debug_glUnmapBuffer +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint *params); +GLAPI PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv; +GLAPI PFNGLGETBUFFERPARAMETERIVPROC glad_debug_glGetBufferParameteriv; +#define glGetBufferParameteriv glad_debug_glGetBufferParameteriv +typedef void (APIENTRYP PFNGLGETBUFFERPOINTERVPROC)(GLenum target, GLenum pname, void **params); +GLAPI PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv; +GLAPI PFNGLGETBUFFERPOINTERVPROC glad_debug_glGetBufferPointerv; +#define glGetBufferPointerv glad_debug_glGetBufferPointerv +#endif +#ifndef GL_VERSION_2_0 +#define GL_VERSION_2_0 1 +GLAPI int GLAD_GL_VERSION_2_0; +typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC)(GLenum modeRGB, GLenum modeAlpha); +GLAPI PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate; +GLAPI PFNGLBLENDEQUATIONSEPARATEPROC glad_debug_glBlendEquationSeparate; +#define glBlendEquationSeparate glad_debug_glBlendEquationSeparate +typedef void (APIENTRYP PFNGLDRAWBUFFERSPROC)(GLsizei n, const GLenum *bufs); +GLAPI PFNGLDRAWBUFFERSPROC glad_glDrawBuffers; +GLAPI PFNGLDRAWBUFFERSPROC glad_debug_glDrawBuffers; +#define glDrawBuffers glad_debug_glDrawBuffers +typedef void (APIENTRYP PFNGLSTENCILOPSEPARATEPROC)(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GLAPI PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate; +GLAPI PFNGLSTENCILOPSEPARATEPROC glad_debug_glStencilOpSeparate; +#define glStencilOpSeparate glad_debug_glStencilOpSeparate +typedef void (APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC)(GLenum face, GLenum func, GLint ref, GLuint mask); +GLAPI PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate; +GLAPI PFNGLSTENCILFUNCSEPARATEPROC glad_debug_glStencilFuncSeparate; +#define glStencilFuncSeparate glad_debug_glStencilFuncSeparate +typedef void (APIENTRYP PFNGLSTENCILMASKSEPARATEPROC)(GLenum face, GLuint mask); +GLAPI PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate; +GLAPI PFNGLSTENCILMASKSEPARATEPROC glad_debug_glStencilMaskSeparate; +#define glStencilMaskSeparate glad_debug_glStencilMaskSeparate +typedef void (APIENTRYP PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader); +GLAPI PFNGLATTACHSHADERPROC glad_glAttachShader; +GLAPI PFNGLATTACHSHADERPROC glad_debug_glAttachShader; +#define glAttachShader glad_debug_glAttachShader +typedef void (APIENTRYP PFNGLBINDATTRIBLOCATIONPROC)(GLuint program, GLuint index, const GLchar *name); +GLAPI PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation; +GLAPI PFNGLBINDATTRIBLOCATIONPROC glad_debug_glBindAttribLocation; +#define glBindAttribLocation glad_debug_glBindAttribLocation +typedef void (APIENTRYP PFNGLCOMPILESHADERPROC)(GLuint shader); +GLAPI PFNGLCOMPILESHADERPROC glad_glCompileShader; +GLAPI PFNGLCOMPILESHADERPROC glad_debug_glCompileShader; +#define glCompileShader glad_debug_glCompileShader +typedef GLuint (APIENTRYP PFNGLCREATEPROGRAMPROC)(void); +GLAPI PFNGLCREATEPROGRAMPROC glad_glCreateProgram; +GLAPI PFNGLCREATEPROGRAMPROC glad_debug_glCreateProgram; +#define glCreateProgram glad_debug_glCreateProgram +typedef GLuint (APIENTRYP PFNGLCREATESHADERPROC)(GLenum type); +GLAPI PFNGLCREATESHADERPROC glad_glCreateShader; +GLAPI PFNGLCREATESHADERPROC glad_debug_glCreateShader; +#define glCreateShader glad_debug_glCreateShader +typedef void (APIENTRYP PFNGLDELETEPROGRAMPROC)(GLuint program); +GLAPI PFNGLDELETEPROGRAMPROC glad_glDeleteProgram; +GLAPI PFNGLDELETEPROGRAMPROC glad_debug_glDeleteProgram; +#define glDeleteProgram glad_debug_glDeleteProgram +typedef void (APIENTRYP PFNGLDELETESHADERPROC)(GLuint shader); +GLAPI PFNGLDELETESHADERPROC glad_glDeleteShader; +GLAPI PFNGLDELETESHADERPROC glad_debug_glDeleteShader; +#define glDeleteShader glad_debug_glDeleteShader +typedef void (APIENTRYP PFNGLDETACHSHADERPROC)(GLuint program, GLuint shader); +GLAPI PFNGLDETACHSHADERPROC glad_glDetachShader; +GLAPI PFNGLDETACHSHADERPROC glad_debug_glDetachShader; +#define glDetachShader glad_debug_glDetachShader +typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC)(GLuint index); +GLAPI PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray; +GLAPI PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_debug_glDisableVertexAttribArray; +#define glDisableVertexAttribArray glad_debug_glDisableVertexAttribArray +typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index); +GLAPI PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray; +GLAPI PFNGLENABLEVERTEXATTRIBARRAYPROC glad_debug_glEnableVertexAttribArray; +#define glEnableVertexAttribArray glad_debug_glEnableVertexAttribArray +typedef void (APIENTRYP PFNGLGETACTIVEATTRIBPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GLAPI PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib; +GLAPI PFNGLGETACTIVEATTRIBPROC glad_debug_glGetActiveAttrib; +#define glGetActiveAttrib glad_debug_glGetActiveAttrib +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GLAPI PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform; +GLAPI PFNGLGETACTIVEUNIFORMPROC glad_debug_glGetActiveUniform; +#define glGetActiveUniform glad_debug_glGetActiveUniform +typedef void (APIENTRYP PFNGLGETATTACHEDSHADERSPROC)(GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GLAPI PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders; +GLAPI PFNGLGETATTACHEDSHADERSPROC glad_debug_glGetAttachedShaders; +#define glGetAttachedShaders glad_debug_glGetAttachedShaders +typedef GLint (APIENTRYP PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const GLchar *name); +GLAPI PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation; +GLAPI PFNGLGETATTRIBLOCATIONPROC glad_debug_glGetAttribLocation; +#define glGetAttribLocation glad_debug_glGetAttribLocation +typedef void (APIENTRYP PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint *params); +GLAPI PFNGLGETPROGRAMIVPROC glad_glGetProgramiv; +GLAPI PFNGLGETPROGRAMIVPROC glad_debug_glGetProgramiv; +#define glGetProgramiv glad_debug_glGetProgramiv +typedef void (APIENTRYP PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GLAPI PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog; +GLAPI PFNGLGETPROGRAMINFOLOGPROC glad_debug_glGetProgramInfoLog; +#define glGetProgramInfoLog glad_debug_glGetProgramInfoLog +typedef void (APIENTRYP PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint *params); +GLAPI PFNGLGETSHADERIVPROC glad_glGetShaderiv; +GLAPI PFNGLGETSHADERIVPROC glad_debug_glGetShaderiv; +#define glGetShaderiv glad_debug_glGetShaderiv +typedef void (APIENTRYP PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GLAPI PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog; +GLAPI PFNGLGETSHADERINFOLOGPROC glad_debug_glGetShaderInfoLog; +#define glGetShaderInfoLog glad_debug_glGetShaderInfoLog +typedef void (APIENTRYP PFNGLGETSHADERSOURCEPROC)(GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GLAPI PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource; +GLAPI PFNGLGETSHADERSOURCEPROC glad_debug_glGetShaderSource; +#define glGetShaderSource glad_debug_glGetShaderSource +typedef GLint (APIENTRYP PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const GLchar *name); +GLAPI PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation; +GLAPI PFNGLGETUNIFORMLOCATIONPROC glad_debug_glGetUniformLocation; +#define glGetUniformLocation glad_debug_glGetUniformLocation +typedef void (APIENTRYP PFNGLGETUNIFORMFVPROC)(GLuint program, GLint location, GLfloat *params); +GLAPI PFNGLGETUNIFORMFVPROC glad_glGetUniformfv; +GLAPI PFNGLGETUNIFORMFVPROC glad_debug_glGetUniformfv; +#define glGetUniformfv glad_debug_glGetUniformfv +typedef void (APIENTRYP PFNGLGETUNIFORMIVPROC)(GLuint program, GLint location, GLint *params); +GLAPI PFNGLGETUNIFORMIVPROC glad_glGetUniformiv; +GLAPI PFNGLGETUNIFORMIVPROC glad_debug_glGetUniformiv; +#define glGetUniformiv glad_debug_glGetUniformiv +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVPROC)(GLuint index, GLenum pname, GLdouble *params); +GLAPI PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv; +GLAPI PFNGLGETVERTEXATTRIBDVPROC glad_debug_glGetVertexAttribdv; +#define glGetVertexAttribdv glad_debug_glGetVertexAttribdv +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVPROC)(GLuint index, GLenum pname, GLfloat *params); +GLAPI PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv; +GLAPI PFNGLGETVERTEXATTRIBFVPROC glad_debug_glGetVertexAttribfv; +#define glGetVertexAttribfv glad_debug_glGetVertexAttribfv +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVPROC)(GLuint index, GLenum pname, GLint *params); +GLAPI PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv; +GLAPI PFNGLGETVERTEXATTRIBIVPROC glad_debug_glGetVertexAttribiv; +#define glGetVertexAttribiv glad_debug_glGetVertexAttribiv +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC)(GLuint index, GLenum pname, void **pointer); +GLAPI PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv; +GLAPI PFNGLGETVERTEXATTRIBPOINTERVPROC glad_debug_glGetVertexAttribPointerv; +#define glGetVertexAttribPointerv glad_debug_glGetVertexAttribPointerv +typedef GLboolean (APIENTRYP PFNGLISPROGRAMPROC)(GLuint program); +GLAPI PFNGLISPROGRAMPROC glad_glIsProgram; +GLAPI PFNGLISPROGRAMPROC glad_debug_glIsProgram; +#define glIsProgram glad_debug_glIsProgram +typedef GLboolean (APIENTRYP PFNGLISSHADERPROC)(GLuint shader); +GLAPI PFNGLISSHADERPROC glad_glIsShader; +GLAPI PFNGLISSHADERPROC glad_debug_glIsShader; +#define glIsShader glad_debug_glIsShader +typedef void (APIENTRYP PFNGLLINKPROGRAMPROC)(GLuint program); +GLAPI PFNGLLINKPROGRAMPROC glad_glLinkProgram; +GLAPI PFNGLLINKPROGRAMPROC glad_debug_glLinkProgram; +#define glLinkProgram glad_debug_glLinkProgram +typedef void (APIENTRYP PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GLAPI PFNGLSHADERSOURCEPROC glad_glShaderSource; +GLAPI PFNGLSHADERSOURCEPROC glad_debug_glShaderSource; +#define glShaderSource glad_debug_glShaderSource +typedef void (APIENTRYP PFNGLUSEPROGRAMPROC)(GLuint program); +GLAPI PFNGLUSEPROGRAMPROC glad_glUseProgram; +GLAPI PFNGLUSEPROGRAMPROC glad_debug_glUseProgram; +#define glUseProgram glad_debug_glUseProgram +typedef void (APIENTRYP PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0); +GLAPI PFNGLUNIFORM1FPROC glad_glUniform1f; +GLAPI PFNGLUNIFORM1FPROC glad_debug_glUniform1f; +#define glUniform1f glad_debug_glUniform1f +typedef void (APIENTRYP PFNGLUNIFORM2FPROC)(GLint location, GLfloat v0, GLfloat v1); +GLAPI PFNGLUNIFORM2FPROC glad_glUniform2f; +GLAPI PFNGLUNIFORM2FPROC glad_debug_glUniform2f; +#define glUniform2f glad_debug_glUniform2f +typedef void (APIENTRYP PFNGLUNIFORM3FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GLAPI PFNGLUNIFORM3FPROC glad_glUniform3f; +GLAPI PFNGLUNIFORM3FPROC glad_debug_glUniform3f; +#define glUniform3f glad_debug_glUniform3f +typedef void (APIENTRYP PFNGLUNIFORM4FPROC)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GLAPI PFNGLUNIFORM4FPROC glad_glUniform4f; +GLAPI PFNGLUNIFORM4FPROC glad_debug_glUniform4f; +#define glUniform4f glad_debug_glUniform4f +typedef void (APIENTRYP PFNGLUNIFORM1IPROC)(GLint location, GLint v0); +GLAPI PFNGLUNIFORM1IPROC glad_glUniform1i; +GLAPI PFNGLUNIFORM1IPROC glad_debug_glUniform1i; +#define glUniform1i glad_debug_glUniform1i +typedef void (APIENTRYP PFNGLUNIFORM2IPROC)(GLint location, GLint v0, GLint v1); +GLAPI PFNGLUNIFORM2IPROC glad_glUniform2i; +GLAPI PFNGLUNIFORM2IPROC glad_debug_glUniform2i; +#define glUniform2i glad_debug_glUniform2i +typedef void (APIENTRYP PFNGLUNIFORM3IPROC)(GLint location, GLint v0, GLint v1, GLint v2); +GLAPI PFNGLUNIFORM3IPROC glad_glUniform3i; +GLAPI PFNGLUNIFORM3IPROC glad_debug_glUniform3i; +#define glUniform3i glad_debug_glUniform3i +typedef void (APIENTRYP PFNGLUNIFORM4IPROC)(GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GLAPI PFNGLUNIFORM4IPROC glad_glUniform4i; +GLAPI PFNGLUNIFORM4IPROC glad_debug_glUniform4i; +#define glUniform4i glad_debug_glUniform4i +typedef void (APIENTRYP PFNGLUNIFORM1FVPROC)(GLint location, GLsizei count, const GLfloat *value); +GLAPI PFNGLUNIFORM1FVPROC glad_glUniform1fv; +GLAPI PFNGLUNIFORM1FVPROC glad_debug_glUniform1fv; +#define glUniform1fv glad_debug_glUniform1fv +typedef void (APIENTRYP PFNGLUNIFORM2FVPROC)(GLint location, GLsizei count, const GLfloat *value); +GLAPI PFNGLUNIFORM2FVPROC glad_glUniform2fv; +GLAPI PFNGLUNIFORM2FVPROC glad_debug_glUniform2fv; +#define glUniform2fv glad_debug_glUniform2fv +typedef void (APIENTRYP PFNGLUNIFORM3FVPROC)(GLint location, GLsizei count, const GLfloat *value); +GLAPI PFNGLUNIFORM3FVPROC glad_glUniform3fv; +GLAPI PFNGLUNIFORM3FVPROC glad_debug_glUniform3fv; +#define glUniform3fv glad_debug_glUniform3fv +typedef void (APIENTRYP PFNGLUNIFORM4FVPROC)(GLint location, GLsizei count, const GLfloat *value); +GLAPI PFNGLUNIFORM4FVPROC glad_glUniform4fv; +GLAPI PFNGLUNIFORM4FVPROC glad_debug_glUniform4fv; +#define glUniform4fv glad_debug_glUniform4fv +typedef void (APIENTRYP PFNGLUNIFORM1IVPROC)(GLint location, GLsizei count, const GLint *value); +GLAPI PFNGLUNIFORM1IVPROC glad_glUniform1iv; +GLAPI PFNGLUNIFORM1IVPROC glad_debug_glUniform1iv; +#define glUniform1iv glad_debug_glUniform1iv +typedef void (APIENTRYP PFNGLUNIFORM2IVPROC)(GLint location, GLsizei count, const GLint *value); +GLAPI PFNGLUNIFORM2IVPROC glad_glUniform2iv; +GLAPI PFNGLUNIFORM2IVPROC glad_debug_glUniform2iv; +#define glUniform2iv glad_debug_glUniform2iv +typedef void (APIENTRYP PFNGLUNIFORM3IVPROC)(GLint location, GLsizei count, const GLint *value); +GLAPI PFNGLUNIFORM3IVPROC glad_glUniform3iv; +GLAPI PFNGLUNIFORM3IVPROC glad_debug_glUniform3iv; +#define glUniform3iv glad_debug_glUniform3iv +typedef void (APIENTRYP PFNGLUNIFORM4IVPROC)(GLint location, GLsizei count, const GLint *value); +GLAPI PFNGLUNIFORM4IVPROC glad_glUniform4iv; +GLAPI PFNGLUNIFORM4IVPROC glad_debug_glUniform4iv; +#define glUniform4iv glad_debug_glUniform4iv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv; +GLAPI PFNGLUNIFORMMATRIX2FVPROC glad_debug_glUniformMatrix2fv; +#define glUniformMatrix2fv glad_debug_glUniformMatrix2fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv; +GLAPI PFNGLUNIFORMMATRIX3FVPROC glad_debug_glUniformMatrix3fv; +#define glUniformMatrix3fv glad_debug_glUniformMatrix3fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv; +GLAPI PFNGLUNIFORMMATRIX4FVPROC glad_debug_glUniformMatrix4fv; +#define glUniformMatrix4fv glad_debug_glUniformMatrix4fv +typedef void (APIENTRYP PFNGLVALIDATEPROGRAMPROC)(GLuint program); +GLAPI PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram; +GLAPI PFNGLVALIDATEPROGRAMPROC glad_debug_glValidateProgram; +#define glValidateProgram glad_debug_glValidateProgram +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DPROC)(GLuint index, GLdouble x); +GLAPI PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d; +GLAPI PFNGLVERTEXATTRIB1DPROC glad_debug_glVertexAttrib1d; +#define glVertexAttrib1d glad_debug_glVertexAttrib1d +typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVPROC)(GLuint index, const GLdouble *v); +GLAPI PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv; +GLAPI PFNGLVERTEXATTRIB1DVPROC glad_debug_glVertexAttrib1dv; +#define glVertexAttrib1dv glad_debug_glVertexAttrib1dv +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FPROC)(GLuint index, GLfloat x); +GLAPI PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f; +GLAPI PFNGLVERTEXATTRIB1FPROC glad_debug_glVertexAttrib1f; +#define glVertexAttrib1f glad_debug_glVertexAttrib1f +typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVPROC)(GLuint index, const GLfloat *v); +GLAPI PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv; +GLAPI PFNGLVERTEXATTRIB1FVPROC glad_debug_glVertexAttrib1fv; +#define glVertexAttrib1fv glad_debug_glVertexAttrib1fv +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SPROC)(GLuint index, GLshort x); +GLAPI PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s; +GLAPI PFNGLVERTEXATTRIB1SPROC glad_debug_glVertexAttrib1s; +#define glVertexAttrib1s glad_debug_glVertexAttrib1s +typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv; +GLAPI PFNGLVERTEXATTRIB1SVPROC glad_debug_glVertexAttrib1sv; +#define glVertexAttrib1sv glad_debug_glVertexAttrib1sv +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DPROC)(GLuint index, GLdouble x, GLdouble y); +GLAPI PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d; +GLAPI PFNGLVERTEXATTRIB2DPROC glad_debug_glVertexAttrib2d; +#define glVertexAttrib2d glad_debug_glVertexAttrib2d +typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVPROC)(GLuint index, const GLdouble *v); +GLAPI PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv; +GLAPI PFNGLVERTEXATTRIB2DVPROC glad_debug_glVertexAttrib2dv; +#define glVertexAttrib2dv glad_debug_glVertexAttrib2dv +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FPROC)(GLuint index, GLfloat x, GLfloat y); +GLAPI PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f; +GLAPI PFNGLVERTEXATTRIB2FPROC glad_debug_glVertexAttrib2f; +#define glVertexAttrib2f glad_debug_glVertexAttrib2f +typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVPROC)(GLuint index, const GLfloat *v); +GLAPI PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv; +GLAPI PFNGLVERTEXATTRIB2FVPROC glad_debug_glVertexAttrib2fv; +#define glVertexAttrib2fv glad_debug_glVertexAttrib2fv +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SPROC)(GLuint index, GLshort x, GLshort y); +GLAPI PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s; +GLAPI PFNGLVERTEXATTRIB2SPROC glad_debug_glVertexAttrib2s; +#define glVertexAttrib2s glad_debug_glVertexAttrib2s +typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv; +GLAPI PFNGLVERTEXATTRIB2SVPROC glad_debug_glVertexAttrib2sv; +#define glVertexAttrib2sv glad_debug_glVertexAttrib2sv +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z); +GLAPI PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d; +GLAPI PFNGLVERTEXATTRIB3DPROC glad_debug_glVertexAttrib3d; +#define glVertexAttrib3d glad_debug_glVertexAttrib3d +typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVPROC)(GLuint index, const GLdouble *v); +GLAPI PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv; +GLAPI PFNGLVERTEXATTRIB3DVPROC glad_debug_glVertexAttrib3dv; +#define glVertexAttrib3dv glad_debug_glVertexAttrib3dv +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z); +GLAPI PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f; +GLAPI PFNGLVERTEXATTRIB3FPROC glad_debug_glVertexAttrib3f; +#define glVertexAttrib3f glad_debug_glVertexAttrib3f +typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVPROC)(GLuint index, const GLfloat *v); +GLAPI PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv; +GLAPI PFNGLVERTEXATTRIB3FVPROC glad_debug_glVertexAttrib3fv; +#define glVertexAttrib3fv glad_debug_glVertexAttrib3fv +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SPROC)(GLuint index, GLshort x, GLshort y, GLshort z); +GLAPI PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s; +GLAPI PFNGLVERTEXATTRIB3SPROC glad_debug_glVertexAttrib3s; +#define glVertexAttrib3s glad_debug_glVertexAttrib3s +typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv; +GLAPI PFNGLVERTEXATTRIB3SVPROC glad_debug_glVertexAttrib3sv; +#define glVertexAttrib3sv glad_debug_glVertexAttrib3sv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NBVPROC)(GLuint index, const GLbyte *v); +GLAPI PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv; +GLAPI PFNGLVERTEXATTRIB4NBVPROC glad_debug_glVertexAttrib4Nbv; +#define glVertexAttrib4Nbv glad_debug_glVertexAttrib4Nbv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NIVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv; +GLAPI PFNGLVERTEXATTRIB4NIVPROC glad_debug_glVertexAttrib4Niv; +#define glVertexAttrib4Niv glad_debug_glVertexAttrib4Niv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NSVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv; +GLAPI PFNGLVERTEXATTRIB4NSVPROC glad_debug_glVertexAttrib4Nsv; +#define glVertexAttrib4Nsv glad_debug_glVertexAttrib4Nsv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBPROC)(GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w); +GLAPI PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub; +GLAPI PFNGLVERTEXATTRIB4NUBPROC glad_debug_glVertexAttrib4Nub; +#define glVertexAttrib4Nub glad_debug_glVertexAttrib4Nub +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBVPROC)(GLuint index, const GLubyte *v); +GLAPI PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv; +GLAPI PFNGLVERTEXATTRIB4NUBVPROC glad_debug_glVertexAttrib4Nubv; +#define glVertexAttrib4Nubv glad_debug_glVertexAttrib4Nubv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv; +GLAPI PFNGLVERTEXATTRIB4NUIVPROC glad_debug_glVertexAttrib4Nuiv; +#define glVertexAttrib4Nuiv glad_debug_glVertexAttrib4Nuiv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUSVPROC)(GLuint index, const GLushort *v); +GLAPI PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv; +GLAPI PFNGLVERTEXATTRIB4NUSVPROC glad_debug_glVertexAttrib4Nusv; +#define glVertexAttrib4Nusv glad_debug_glVertexAttrib4Nusv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4BVPROC)(GLuint index, const GLbyte *v); +GLAPI PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv; +GLAPI PFNGLVERTEXATTRIB4BVPROC glad_debug_glVertexAttrib4bv; +#define glVertexAttrib4bv glad_debug_glVertexAttrib4bv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DPROC)(GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w); +GLAPI PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d; +GLAPI PFNGLVERTEXATTRIB4DPROC glad_debug_glVertexAttrib4d; +#define glVertexAttrib4d glad_debug_glVertexAttrib4d +typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVPROC)(GLuint index, const GLdouble *v); +GLAPI PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv; +GLAPI PFNGLVERTEXATTRIB4DVPROC glad_debug_glVertexAttrib4dv; +#define glVertexAttrib4dv glad_debug_glVertexAttrib4dv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FPROC)(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GLAPI PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f; +GLAPI PFNGLVERTEXATTRIB4FPROC glad_debug_glVertexAttrib4f; +#define glVertexAttrib4f glad_debug_glVertexAttrib4f +typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVPROC)(GLuint index, const GLfloat *v); +GLAPI PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv; +GLAPI PFNGLVERTEXATTRIB4FVPROC glad_debug_glVertexAttrib4fv; +#define glVertexAttrib4fv glad_debug_glVertexAttrib4fv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4IVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv; +GLAPI PFNGLVERTEXATTRIB4IVPROC glad_debug_glVertexAttrib4iv; +#define glVertexAttrib4iv glad_debug_glVertexAttrib4iv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SPROC)(GLuint index, GLshort x, GLshort y, GLshort z, GLshort w); +GLAPI PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s; +GLAPI PFNGLVERTEXATTRIB4SPROC glad_debug_glVertexAttrib4s; +#define glVertexAttrib4s glad_debug_glVertexAttrib4s +typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv; +GLAPI PFNGLVERTEXATTRIB4SVPROC glad_debug_glVertexAttrib4sv; +#define glVertexAttrib4sv glad_debug_glVertexAttrib4sv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVPROC)(GLuint index, const GLubyte *v); +GLAPI PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv; +GLAPI PFNGLVERTEXATTRIB4UBVPROC glad_debug_glVertexAttrib4ubv; +#define glVertexAttrib4ubv glad_debug_glVertexAttrib4ubv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4UIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv; +GLAPI PFNGLVERTEXATTRIB4UIVPROC glad_debug_glVertexAttrib4uiv; +#define glVertexAttrib4uiv glad_debug_glVertexAttrib4uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIB4USVPROC)(GLuint index, const GLushort *v); +GLAPI PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv; +GLAPI PFNGLVERTEXATTRIB4USVPROC glad_debug_glVertexAttrib4usv; +#define glVertexAttrib4usv glad_debug_glVertexAttrib4usv +typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GLAPI PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer; +GLAPI PFNGLVERTEXATTRIBPOINTERPROC glad_debug_glVertexAttribPointer; +#define glVertexAttribPointer glad_debug_glVertexAttribPointer +#endif +#ifndef GL_VERSION_2_1 +#define GL_VERSION_2_1 1 +GLAPI int GLAD_GL_VERSION_2_1; +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv; +GLAPI PFNGLUNIFORMMATRIX2X3FVPROC glad_debug_glUniformMatrix2x3fv; +#define glUniformMatrix2x3fv glad_debug_glUniformMatrix2x3fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv; +GLAPI PFNGLUNIFORMMATRIX3X2FVPROC glad_debug_glUniformMatrix3x2fv; +#define glUniformMatrix3x2fv glad_debug_glUniformMatrix3x2fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv; +GLAPI PFNGLUNIFORMMATRIX2X4FVPROC glad_debug_glUniformMatrix2x4fv; +#define glUniformMatrix2x4fv glad_debug_glUniformMatrix2x4fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv; +GLAPI PFNGLUNIFORMMATRIX4X2FVPROC glad_debug_glUniformMatrix4x2fv; +#define glUniformMatrix4x2fv glad_debug_glUniformMatrix4x2fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv; +GLAPI PFNGLUNIFORMMATRIX3X4FVPROC glad_debug_glUniformMatrix3x4fv; +#define glUniformMatrix3x4fv glad_debug_glUniformMatrix3x4fv +typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GLAPI PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv; +GLAPI PFNGLUNIFORMMATRIX4X3FVPROC glad_debug_glUniformMatrix4x3fv; +#define glUniformMatrix4x3fv glad_debug_glUniformMatrix4x3fv +#endif +#ifndef GL_VERSION_3_0 +#define GL_VERSION_3_0 1 +GLAPI int GLAD_GL_VERSION_3_0; +typedef void (APIENTRYP PFNGLCOLORMASKIPROC)(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GLAPI PFNGLCOLORMASKIPROC glad_glColorMaski; +GLAPI PFNGLCOLORMASKIPROC glad_debug_glColorMaski; +#define glColorMaski glad_debug_glColorMaski +typedef void (APIENTRYP PFNGLGETBOOLEANI_VPROC)(GLenum target, GLuint index, GLboolean *data); +GLAPI PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v; +GLAPI PFNGLGETBOOLEANI_VPROC glad_debug_glGetBooleani_v; +#define glGetBooleani_v glad_debug_glGetBooleani_v +typedef void (APIENTRYP PFNGLGETINTEGERI_VPROC)(GLenum target, GLuint index, GLint *data); +GLAPI PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v; +GLAPI PFNGLGETINTEGERI_VPROC glad_debug_glGetIntegeri_v; +#define glGetIntegeri_v glad_debug_glGetIntegeri_v +typedef void (APIENTRYP PFNGLENABLEIPROC)(GLenum target, GLuint index); +GLAPI PFNGLENABLEIPROC glad_glEnablei; +GLAPI PFNGLENABLEIPROC glad_debug_glEnablei; +#define glEnablei glad_debug_glEnablei +typedef void (APIENTRYP PFNGLDISABLEIPROC)(GLenum target, GLuint index); +GLAPI PFNGLDISABLEIPROC glad_glDisablei; +GLAPI PFNGLDISABLEIPROC glad_debug_glDisablei; +#define glDisablei glad_debug_glDisablei +typedef GLboolean (APIENTRYP PFNGLISENABLEDIPROC)(GLenum target, GLuint index); +GLAPI PFNGLISENABLEDIPROC glad_glIsEnabledi; +GLAPI PFNGLISENABLEDIPROC glad_debug_glIsEnabledi; +#define glIsEnabledi glad_debug_glIsEnabledi +typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC)(GLenum primitiveMode); +GLAPI PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback; +GLAPI PFNGLBEGINTRANSFORMFEEDBACKPROC glad_debug_glBeginTransformFeedback; +#define glBeginTransformFeedback glad_debug_glBeginTransformFeedback +typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC)(void); +GLAPI PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback; +GLAPI PFNGLENDTRANSFORMFEEDBACKPROC glad_debug_glEndTransformFeedback; +#define glEndTransformFeedback glad_debug_glEndTransformFeedback +typedef void (APIENTRYP PFNGLBINDBUFFERRANGEPROC)(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GLAPI PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange; +GLAPI PFNGLBINDBUFFERRANGEPROC glad_debug_glBindBufferRange; +#define glBindBufferRange glad_debug_glBindBufferRange +typedef void (APIENTRYP PFNGLBINDBUFFERBASEPROC)(GLenum target, GLuint index, GLuint buffer); +GLAPI PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase; +GLAPI PFNGLBINDBUFFERBASEPROC glad_debug_glBindBufferBase; +#define glBindBufferBase glad_debug_glBindBufferBase +typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC)(GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GLAPI PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings; +GLAPI PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_debug_glTransformFeedbackVaryings; +#define glTransformFeedbackVaryings glad_debug_glTransformFeedbackVaryings +typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GLAPI PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying; +GLAPI PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_debug_glGetTransformFeedbackVarying; +#define glGetTransformFeedbackVarying glad_debug_glGetTransformFeedbackVarying +typedef void (APIENTRYP PFNGLCLAMPCOLORPROC)(GLenum target, GLenum clamp); +GLAPI PFNGLCLAMPCOLORPROC glad_glClampColor; +GLAPI PFNGLCLAMPCOLORPROC glad_debug_glClampColor; +#define glClampColor glad_debug_glClampColor +typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERPROC)(GLuint id, GLenum mode); +GLAPI PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender; +GLAPI PFNGLBEGINCONDITIONALRENDERPROC glad_debug_glBeginConditionalRender; +#define glBeginConditionalRender glad_debug_glBeginConditionalRender +typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERPROC)(void); +GLAPI PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender; +GLAPI PFNGLENDCONDITIONALRENDERPROC glad_debug_glEndConditionalRender; +#define glEndConditionalRender glad_debug_glEndConditionalRender +typedef void (APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC)(GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GLAPI PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer; +GLAPI PFNGLVERTEXATTRIBIPOINTERPROC glad_debug_glVertexAttribIPointer; +#define glVertexAttribIPointer glad_debug_glVertexAttribIPointer +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC)(GLuint index, GLenum pname, GLint *params); +GLAPI PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv; +GLAPI PFNGLGETVERTEXATTRIBIIVPROC glad_debug_glGetVertexAttribIiv; +#define glGetVertexAttribIiv glad_debug_glGetVertexAttribIiv +typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC)(GLuint index, GLenum pname, GLuint *params); +GLAPI PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv; +GLAPI PFNGLGETVERTEXATTRIBIUIVPROC glad_debug_glGetVertexAttribIuiv; +#define glGetVertexAttribIuiv glad_debug_glGetVertexAttribIuiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IPROC)(GLuint index, GLint x); +GLAPI PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i; +GLAPI PFNGLVERTEXATTRIBI1IPROC glad_debug_glVertexAttribI1i; +#define glVertexAttribI1i glad_debug_glVertexAttribI1i +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IPROC)(GLuint index, GLint x, GLint y); +GLAPI PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i; +GLAPI PFNGLVERTEXATTRIBI2IPROC glad_debug_glVertexAttribI2i; +#define glVertexAttribI2i glad_debug_glVertexAttribI2i +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IPROC)(GLuint index, GLint x, GLint y, GLint z); +GLAPI PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i; +GLAPI PFNGLVERTEXATTRIBI3IPROC glad_debug_glVertexAttribI3i; +#define glVertexAttribI3i glad_debug_glVertexAttribI3i +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IPROC)(GLuint index, GLint x, GLint y, GLint z, GLint w); +GLAPI PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i; +GLAPI PFNGLVERTEXATTRIBI4IPROC glad_debug_glVertexAttribI4i; +#define glVertexAttribI4i glad_debug_glVertexAttribI4i +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIPROC)(GLuint index, GLuint x); +GLAPI PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui; +GLAPI PFNGLVERTEXATTRIBI1UIPROC glad_debug_glVertexAttribI1ui; +#define glVertexAttribI1ui glad_debug_glVertexAttribI1ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIPROC)(GLuint index, GLuint x, GLuint y); +GLAPI PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui; +GLAPI PFNGLVERTEXATTRIBI2UIPROC glad_debug_glVertexAttribI2ui; +#define glVertexAttribI2ui glad_debug_glVertexAttribI2ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z); +GLAPI PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui; +GLAPI PFNGLVERTEXATTRIBI3UIPROC glad_debug_glVertexAttribI3ui; +#define glVertexAttribI3ui glad_debug_glVertexAttribI3ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIPROC)(GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GLAPI PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui; +GLAPI PFNGLVERTEXATTRIBI4UIPROC glad_debug_glVertexAttribI4ui; +#define glVertexAttribI4ui glad_debug_glVertexAttribI4ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv; +GLAPI PFNGLVERTEXATTRIBI1IVPROC glad_debug_glVertexAttribI1iv; +#define glVertexAttribI1iv glad_debug_glVertexAttribI1iv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv; +GLAPI PFNGLVERTEXATTRIBI2IVPROC glad_debug_glVertexAttribI2iv; +#define glVertexAttribI2iv glad_debug_glVertexAttribI2iv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv; +GLAPI PFNGLVERTEXATTRIBI3IVPROC glad_debug_glVertexAttribI3iv; +#define glVertexAttribI3iv glad_debug_glVertexAttribI3iv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IVPROC)(GLuint index, const GLint *v); +GLAPI PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv; +GLAPI PFNGLVERTEXATTRIBI4IVPROC glad_debug_glVertexAttribI4iv; +#define glVertexAttribI4iv glad_debug_glVertexAttribI4iv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv; +GLAPI PFNGLVERTEXATTRIBI1UIVPROC glad_debug_glVertexAttribI1uiv; +#define glVertexAttribI1uiv glad_debug_glVertexAttribI1uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv; +GLAPI PFNGLVERTEXATTRIBI2UIVPROC glad_debug_glVertexAttribI2uiv; +#define glVertexAttribI2uiv glad_debug_glVertexAttribI2uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv; +GLAPI PFNGLVERTEXATTRIBI3UIVPROC glad_debug_glVertexAttribI3uiv; +#define glVertexAttribI3uiv glad_debug_glVertexAttribI3uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC)(GLuint index, const GLuint *v); +GLAPI PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv; +GLAPI PFNGLVERTEXATTRIBI4UIVPROC glad_debug_glVertexAttribI4uiv; +#define glVertexAttribI4uiv glad_debug_glVertexAttribI4uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4BVPROC)(GLuint index, const GLbyte *v); +GLAPI PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv; +GLAPI PFNGLVERTEXATTRIBI4BVPROC glad_debug_glVertexAttribI4bv; +#define glVertexAttribI4bv glad_debug_glVertexAttribI4bv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4SVPROC)(GLuint index, const GLshort *v); +GLAPI PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv; +GLAPI PFNGLVERTEXATTRIBI4SVPROC glad_debug_glVertexAttribI4sv; +#define glVertexAttribI4sv glad_debug_glVertexAttribI4sv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UBVPROC)(GLuint index, const GLubyte *v); +GLAPI PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv; +GLAPI PFNGLVERTEXATTRIBI4UBVPROC glad_debug_glVertexAttribI4ubv; +#define glVertexAttribI4ubv glad_debug_glVertexAttribI4ubv +typedef void (APIENTRYP PFNGLVERTEXATTRIBI4USVPROC)(GLuint index, const GLushort *v); +GLAPI PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv; +GLAPI PFNGLVERTEXATTRIBI4USVPROC glad_debug_glVertexAttribI4usv; +#define glVertexAttribI4usv glad_debug_glVertexAttribI4usv +typedef void (APIENTRYP PFNGLGETUNIFORMUIVPROC)(GLuint program, GLint location, GLuint *params); +GLAPI PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv; +GLAPI PFNGLGETUNIFORMUIVPROC glad_debug_glGetUniformuiv; +#define glGetUniformuiv glad_debug_glGetUniformuiv +typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONPROC)(GLuint program, GLuint color, const GLchar *name); +GLAPI PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation; +GLAPI PFNGLBINDFRAGDATALOCATIONPROC glad_debug_glBindFragDataLocation; +#define glBindFragDataLocation glad_debug_glBindFragDataLocation +typedef GLint (APIENTRYP PFNGLGETFRAGDATALOCATIONPROC)(GLuint program, const GLchar *name); +GLAPI PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation; +GLAPI PFNGLGETFRAGDATALOCATIONPROC glad_debug_glGetFragDataLocation; +#define glGetFragDataLocation glad_debug_glGetFragDataLocation +typedef void (APIENTRYP PFNGLUNIFORM1UIPROC)(GLint location, GLuint v0); +GLAPI PFNGLUNIFORM1UIPROC glad_glUniform1ui; +GLAPI PFNGLUNIFORM1UIPROC glad_debug_glUniform1ui; +#define glUniform1ui glad_debug_glUniform1ui +typedef void (APIENTRYP PFNGLUNIFORM2UIPROC)(GLint location, GLuint v0, GLuint v1); +GLAPI PFNGLUNIFORM2UIPROC glad_glUniform2ui; +GLAPI PFNGLUNIFORM2UIPROC glad_debug_glUniform2ui; +#define glUniform2ui glad_debug_glUniform2ui +typedef void (APIENTRYP PFNGLUNIFORM3UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2); +GLAPI PFNGLUNIFORM3UIPROC glad_glUniform3ui; +GLAPI PFNGLUNIFORM3UIPROC glad_debug_glUniform3ui; +#define glUniform3ui glad_debug_glUniform3ui +typedef void (APIENTRYP PFNGLUNIFORM4UIPROC)(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GLAPI PFNGLUNIFORM4UIPROC glad_glUniform4ui; +GLAPI PFNGLUNIFORM4UIPROC glad_debug_glUniform4ui; +#define glUniform4ui glad_debug_glUniform4ui +typedef void (APIENTRYP PFNGLUNIFORM1UIVPROC)(GLint location, GLsizei count, const GLuint *value); +GLAPI PFNGLUNIFORM1UIVPROC glad_glUniform1uiv; +GLAPI PFNGLUNIFORM1UIVPROC glad_debug_glUniform1uiv; +#define glUniform1uiv glad_debug_glUniform1uiv +typedef void (APIENTRYP PFNGLUNIFORM2UIVPROC)(GLint location, GLsizei count, const GLuint *value); +GLAPI PFNGLUNIFORM2UIVPROC glad_glUniform2uiv; +GLAPI PFNGLUNIFORM2UIVPROC glad_debug_glUniform2uiv; +#define glUniform2uiv glad_debug_glUniform2uiv +typedef void (APIENTRYP PFNGLUNIFORM3UIVPROC)(GLint location, GLsizei count, const GLuint *value); +GLAPI PFNGLUNIFORM3UIVPROC glad_glUniform3uiv; +GLAPI PFNGLUNIFORM3UIVPROC glad_debug_glUniform3uiv; +#define glUniform3uiv glad_debug_glUniform3uiv +typedef void (APIENTRYP PFNGLUNIFORM4UIVPROC)(GLint location, GLsizei count, const GLuint *value); +GLAPI PFNGLUNIFORM4UIVPROC glad_glUniform4uiv; +GLAPI PFNGLUNIFORM4UIVPROC glad_debug_glUniform4uiv; +#define glUniform4uiv glad_debug_glUniform4uiv +typedef void (APIENTRYP PFNGLTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, const GLint *params); +GLAPI PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv; +GLAPI PFNGLTEXPARAMETERIIVPROC glad_debug_glTexParameterIiv; +#define glTexParameterIiv glad_debug_glTexParameterIiv +typedef void (APIENTRYP PFNGLTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, const GLuint *params); +GLAPI PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv; +GLAPI PFNGLTEXPARAMETERIUIVPROC glad_debug_glTexParameterIuiv; +#define glTexParameterIuiv glad_debug_glTexParameterIuiv +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIIVPROC)(GLenum target, GLenum pname, GLint *params); +GLAPI PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv; +GLAPI PFNGLGETTEXPARAMETERIIVPROC glad_debug_glGetTexParameterIiv; +#define glGetTexParameterIiv glad_debug_glGetTexParameterIiv +typedef void (APIENTRYP PFNGLGETTEXPARAMETERIUIVPROC)(GLenum target, GLenum pname, GLuint *params); +GLAPI PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv; +GLAPI PFNGLGETTEXPARAMETERIUIVPROC glad_debug_glGetTexParameterIuiv; +#define glGetTexParameterIuiv glad_debug_glGetTexParameterIuiv +typedef void (APIENTRYP PFNGLCLEARBUFFERIVPROC)(GLenum buffer, GLint drawbuffer, const GLint *value); +GLAPI PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv; +GLAPI PFNGLCLEARBUFFERIVPROC glad_debug_glClearBufferiv; +#define glClearBufferiv glad_debug_glClearBufferiv +typedef void (APIENTRYP PFNGLCLEARBUFFERUIVPROC)(GLenum buffer, GLint drawbuffer, const GLuint *value); +GLAPI PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv; +GLAPI PFNGLCLEARBUFFERUIVPROC glad_debug_glClearBufferuiv; +#define glClearBufferuiv glad_debug_glClearBufferuiv +typedef void (APIENTRYP PFNGLCLEARBUFFERFVPROC)(GLenum buffer, GLint drawbuffer, const GLfloat *value); +GLAPI PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv; +GLAPI PFNGLCLEARBUFFERFVPROC glad_debug_glClearBufferfv; +#define glClearBufferfv glad_debug_glClearBufferfv +typedef void (APIENTRYP PFNGLCLEARBUFFERFIPROC)(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GLAPI PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi; +GLAPI PFNGLCLEARBUFFERFIPROC glad_debug_glClearBufferfi; +#define glClearBufferfi glad_debug_glClearBufferfi +typedef const GLubyte * (APIENTRYP PFNGLGETSTRINGIPROC)(GLenum name, GLuint index); +GLAPI PFNGLGETSTRINGIPROC glad_glGetStringi; +GLAPI PFNGLGETSTRINGIPROC glad_debug_glGetStringi; +#define glGetStringi glad_debug_glGetStringi +typedef GLboolean (APIENTRYP PFNGLISRENDERBUFFERPROC)(GLuint renderbuffer); +GLAPI PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer; +GLAPI PFNGLISRENDERBUFFERPROC glad_debug_glIsRenderbuffer; +#define glIsRenderbuffer glad_debug_glIsRenderbuffer +typedef void (APIENTRYP PFNGLBINDRENDERBUFFERPROC)(GLenum target, GLuint renderbuffer); +GLAPI PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer; +GLAPI PFNGLBINDRENDERBUFFERPROC glad_debug_glBindRenderbuffer; +#define glBindRenderbuffer glad_debug_glBindRenderbuffer +typedef void (APIENTRYP PFNGLDELETERENDERBUFFERSPROC)(GLsizei n, const GLuint *renderbuffers); +GLAPI PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers; +GLAPI PFNGLDELETERENDERBUFFERSPROC glad_debug_glDeleteRenderbuffers; +#define glDeleteRenderbuffers glad_debug_glDeleteRenderbuffers +typedef void (APIENTRYP PFNGLGENRENDERBUFFERSPROC)(GLsizei n, GLuint *renderbuffers); +GLAPI PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers; +GLAPI PFNGLGENRENDERBUFFERSPROC glad_debug_glGenRenderbuffers; +#define glGenRenderbuffers glad_debug_glGenRenderbuffers +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage; +GLAPI PFNGLRENDERBUFFERSTORAGEPROC glad_debug_glRenderbufferStorage; +#define glRenderbufferStorage glad_debug_glRenderbufferStorage +typedef void (APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC)(GLenum target, GLenum pname, GLint *params); +GLAPI PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv; +GLAPI PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_debug_glGetRenderbufferParameteriv; +#define glGetRenderbufferParameteriv glad_debug_glGetRenderbufferParameteriv +typedef GLboolean (APIENTRYP PFNGLISFRAMEBUFFERPROC)(GLuint framebuffer); +GLAPI PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer; +GLAPI PFNGLISFRAMEBUFFERPROC glad_debug_glIsFramebuffer; +#define glIsFramebuffer glad_debug_glIsFramebuffer +typedef void (APIENTRYP PFNGLBINDFRAMEBUFFERPROC)(GLenum target, GLuint framebuffer); +GLAPI PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer; +GLAPI PFNGLBINDFRAMEBUFFERPROC glad_debug_glBindFramebuffer; +#define glBindFramebuffer glad_debug_glBindFramebuffer +typedef void (APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC)(GLsizei n, const GLuint *framebuffers); +GLAPI PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers; +GLAPI PFNGLDELETEFRAMEBUFFERSPROC glad_debug_glDeleteFramebuffers; +#define glDeleteFramebuffers glad_debug_glDeleteFramebuffers +typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSPROC)(GLsizei n, GLuint *framebuffers); +GLAPI PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers; +GLAPI PFNGLGENFRAMEBUFFERSPROC glad_debug_glGenFramebuffers; +#define glGenFramebuffers glad_debug_glGenFramebuffers +typedef GLenum (APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC)(GLenum target); +GLAPI PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus; +GLAPI PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_debug_glCheckFramebufferStatus; +#define glCheckFramebufferStatus glad_debug_glCheckFramebufferStatus +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE1DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D; +GLAPI PFNGLFRAMEBUFFERTEXTURE1DPROC glad_debug_glFramebufferTexture1D; +#define glFramebufferTexture1D glad_debug_glFramebufferTexture1D +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GLAPI PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D; +GLAPI PFNGLFRAMEBUFFERTEXTURE2DPROC glad_debug_glFramebufferTexture2D; +#define glFramebufferTexture2D glad_debug_glFramebufferTexture2D +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DPROC)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +GLAPI PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D; +GLAPI PFNGLFRAMEBUFFERTEXTURE3DPROC glad_debug_glFramebufferTexture3D; +#define glFramebufferTexture3D glad_debug_glFramebufferTexture3D +typedef void (APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GLAPI PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer; +GLAPI PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_debug_glFramebufferRenderbuffer; +#define glFramebufferRenderbuffer glad_debug_glFramebufferRenderbuffer +typedef void (APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)(GLenum target, GLenum attachment, GLenum pname, GLint *params); +GLAPI PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv; +GLAPI PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_debug_glGetFramebufferAttachmentParameteriv; +#define glGetFramebufferAttachmentParameteriv glad_debug_glGetFramebufferAttachmentParameteriv +typedef void (APIENTRYP PFNGLGENERATEMIPMAPPROC)(GLenum target); +GLAPI PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap; +GLAPI PFNGLGENERATEMIPMAPPROC glad_debug_glGenerateMipmap; +#define glGenerateMipmap glad_debug_glGenerateMipmap +typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERPROC)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GLAPI PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer; +GLAPI PFNGLBLITFRAMEBUFFERPROC glad_debug_glBlitFramebuffer; +#define glBlitFramebuffer glad_debug_glBlitFramebuffer +typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GLAPI PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample; +GLAPI PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_debug_glRenderbufferStorageMultisample; +#define glRenderbufferStorageMultisample glad_debug_glRenderbufferStorageMultisample +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GLAPI PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer; +GLAPI PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_debug_glFramebufferTextureLayer; +#define glFramebufferTextureLayer glad_debug_glFramebufferTextureLayer +typedef void * (APIENTRYP PFNGLMAPBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GLAPI PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange; +GLAPI PFNGLMAPBUFFERRANGEPROC glad_debug_glMapBufferRange; +#define glMapBufferRange glad_debug_glMapBufferRange +typedef void (APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC)(GLenum target, GLintptr offset, GLsizeiptr length); +GLAPI PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange; +GLAPI PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_debug_glFlushMappedBufferRange; +#define glFlushMappedBufferRange glad_debug_glFlushMappedBufferRange +typedef void (APIENTRYP PFNGLBINDVERTEXARRAYPROC)(GLuint array); +GLAPI PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray; +GLAPI PFNGLBINDVERTEXARRAYPROC glad_debug_glBindVertexArray; +#define glBindVertexArray glad_debug_glBindVertexArray +typedef void (APIENTRYP PFNGLDELETEVERTEXARRAYSPROC)(GLsizei n, const GLuint *arrays); +GLAPI PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays; +GLAPI PFNGLDELETEVERTEXARRAYSPROC glad_debug_glDeleteVertexArrays; +#define glDeleteVertexArrays glad_debug_glDeleteVertexArrays +typedef void (APIENTRYP PFNGLGENVERTEXARRAYSPROC)(GLsizei n, GLuint *arrays); +GLAPI PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays; +GLAPI PFNGLGENVERTEXARRAYSPROC glad_debug_glGenVertexArrays; +#define glGenVertexArrays glad_debug_glGenVertexArrays +typedef GLboolean (APIENTRYP PFNGLISVERTEXARRAYPROC)(GLuint array); +GLAPI PFNGLISVERTEXARRAYPROC glad_glIsVertexArray; +GLAPI PFNGLISVERTEXARRAYPROC glad_debug_glIsVertexArray; +#define glIsVertexArray glad_debug_glIsVertexArray +#endif +#ifndef GL_VERSION_3_1 +#define GL_VERSION_3_1 1 +GLAPI int GLAD_GL_VERSION_3_1; +typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC)(GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GLAPI PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced; +GLAPI PFNGLDRAWARRAYSINSTANCEDPROC glad_debug_glDrawArraysInstanced; +#define glDrawArraysInstanced glad_debug_glDrawArraysInstanced +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC)(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GLAPI PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced; +GLAPI PFNGLDRAWELEMENTSINSTANCEDPROC glad_debug_glDrawElementsInstanced; +#define glDrawElementsInstanced glad_debug_glDrawElementsInstanced +typedef void (APIENTRYP PFNGLTEXBUFFERPROC)(GLenum target, GLenum internalformat, GLuint buffer); +GLAPI PFNGLTEXBUFFERPROC glad_glTexBuffer; +GLAPI PFNGLTEXBUFFERPROC glad_debug_glTexBuffer; +#define glTexBuffer glad_debug_glTexBuffer +typedef void (APIENTRYP PFNGLPRIMITIVERESTARTINDEXPROC)(GLuint index); +GLAPI PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex; +GLAPI PFNGLPRIMITIVERESTARTINDEXPROC glad_debug_glPrimitiveRestartIndex; +#define glPrimitiveRestartIndex glad_debug_glPrimitiveRestartIndex +typedef void (APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC)(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GLAPI PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData; +GLAPI PFNGLCOPYBUFFERSUBDATAPROC glad_debug_glCopyBufferSubData; +#define glCopyBufferSubData glad_debug_glCopyBufferSubData +typedef void (APIENTRYP PFNGLGETUNIFORMINDICESPROC)(GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GLAPI PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices; +GLAPI PFNGLGETUNIFORMINDICESPROC glad_debug_glGetUniformIndices; +#define glGetUniformIndices glad_debug_glGetUniformIndices +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC)(GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GLAPI PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv; +GLAPI PFNGLGETACTIVEUNIFORMSIVPROC glad_debug_glGetActiveUniformsiv; +#define glGetActiveUniformsiv glad_debug_glGetActiveUniformsiv +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMNAMEPROC)(GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName); +GLAPI PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName; +GLAPI PFNGLGETACTIVEUNIFORMNAMEPROC glad_debug_glGetActiveUniformName; +#define glGetActiveUniformName glad_debug_glGetActiveUniformName +typedef GLuint (APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC)(GLuint program, const GLchar *uniformBlockName); +GLAPI PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex; +GLAPI PFNGLGETUNIFORMBLOCKINDEXPROC glad_debug_glGetUniformBlockIndex; +#define glGetUniformBlockIndex glad_debug_glGetUniformBlockIndex +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC)(GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GLAPI PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv; +GLAPI PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_debug_glGetActiveUniformBlockiv; +#define glGetActiveUniformBlockiv glad_debug_glGetActiveUniformBlockiv +typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)(GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GLAPI PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName; +GLAPI PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_debug_glGetActiveUniformBlockName; +#define glGetActiveUniformBlockName glad_debug_glGetActiveUniformBlockName +typedef void (APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC)(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GLAPI PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding; +GLAPI PFNGLUNIFORMBLOCKBINDINGPROC glad_debug_glUniformBlockBinding; +#define glUniformBlockBinding glad_debug_glUniformBlockBinding +#endif +#ifndef GL_VERSION_3_2 +#define GL_VERSION_3_2 1 +GLAPI int GLAD_GL_VERSION_3_2; +typedef void (APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GLAPI PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex; +GLAPI PFNGLDRAWELEMENTSBASEVERTEXPROC glad_debug_glDrawElementsBaseVertex; +#define glDrawElementsBaseVertex glad_debug_glDrawElementsBaseVertex +typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GLAPI PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex; +GLAPI PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_debug_glDrawRangeElementsBaseVertex; +#define glDrawRangeElementsBaseVertex glad_debug_glDrawRangeElementsBaseVertex +typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GLAPI PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex; +GLAPI PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_debug_glDrawElementsInstancedBaseVertex; +#define glDrawElementsInstancedBaseVertex glad_debug_glDrawElementsInstancedBaseVertex +typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)(GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount, const GLint *basevertex); +GLAPI PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex; +GLAPI PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_debug_glMultiDrawElementsBaseVertex; +#define glMultiDrawElementsBaseVertex glad_debug_glMultiDrawElementsBaseVertex +typedef void (APIENTRYP PFNGLPROVOKINGVERTEXPROC)(GLenum mode); +GLAPI PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex; +GLAPI PFNGLPROVOKINGVERTEXPROC glad_debug_glProvokingVertex; +#define glProvokingVertex glad_debug_glProvokingVertex +typedef GLsync (APIENTRYP PFNGLFENCESYNCPROC)(GLenum condition, GLbitfield flags); +GLAPI PFNGLFENCESYNCPROC glad_glFenceSync; +GLAPI PFNGLFENCESYNCPROC glad_debug_glFenceSync; +#define glFenceSync glad_debug_glFenceSync +typedef GLboolean (APIENTRYP PFNGLISSYNCPROC)(GLsync sync); +GLAPI PFNGLISSYNCPROC glad_glIsSync; +GLAPI PFNGLISSYNCPROC glad_debug_glIsSync; +#define glIsSync glad_debug_glIsSync +typedef void (APIENTRYP PFNGLDELETESYNCPROC)(GLsync sync); +GLAPI PFNGLDELETESYNCPROC glad_glDeleteSync; +GLAPI PFNGLDELETESYNCPROC glad_debug_glDeleteSync; +#define glDeleteSync glad_debug_glDeleteSync +typedef GLenum (APIENTRYP PFNGLCLIENTWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout); +GLAPI PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync; +GLAPI PFNGLCLIENTWAITSYNCPROC glad_debug_glClientWaitSync; +#define glClientWaitSync glad_debug_glClientWaitSync +typedef void (APIENTRYP PFNGLWAITSYNCPROC)(GLsync sync, GLbitfield flags, GLuint64 timeout); +GLAPI PFNGLWAITSYNCPROC glad_glWaitSync; +GLAPI PFNGLWAITSYNCPROC glad_debug_glWaitSync; +#define glWaitSync glad_debug_glWaitSync +typedef void (APIENTRYP PFNGLGETINTEGER64VPROC)(GLenum pname, GLint64 *data); +GLAPI PFNGLGETINTEGER64VPROC glad_glGetInteger64v; +GLAPI PFNGLGETINTEGER64VPROC glad_debug_glGetInteger64v; +#define glGetInteger64v glad_debug_glGetInteger64v +typedef void (APIENTRYP PFNGLGETSYNCIVPROC)(GLsync sync, GLenum pname, GLsizei count, GLsizei *length, GLint *values); +GLAPI PFNGLGETSYNCIVPROC glad_glGetSynciv; +GLAPI PFNGLGETSYNCIVPROC glad_debug_glGetSynciv; +#define glGetSynciv glad_debug_glGetSynciv +typedef void (APIENTRYP PFNGLGETINTEGER64I_VPROC)(GLenum target, GLuint index, GLint64 *data); +GLAPI PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v; +GLAPI PFNGLGETINTEGER64I_VPROC glad_debug_glGetInteger64i_v; +#define glGetInteger64i_v glad_debug_glGetInteger64i_v +typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC)(GLenum target, GLenum pname, GLint64 *params); +GLAPI PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v; +GLAPI PFNGLGETBUFFERPARAMETERI64VPROC glad_debug_glGetBufferParameteri64v; +#define glGetBufferParameteri64v glad_debug_glGetBufferParameteri64v +typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREPROC)(GLenum target, GLenum attachment, GLuint texture, GLint level); +GLAPI PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture; +GLAPI PFNGLFRAMEBUFFERTEXTUREPROC glad_debug_glFramebufferTexture; +#define glFramebufferTexture glad_debug_glFramebufferTexture +typedef void (APIENTRYP PFNGLTEXIMAGE2DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GLAPI PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample; +GLAPI PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_debug_glTexImage2DMultisample; +#define glTexImage2DMultisample glad_debug_glTexImage2DMultisample +typedef void (APIENTRYP PFNGLTEXIMAGE3DMULTISAMPLEPROC)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +GLAPI PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample; +GLAPI PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_debug_glTexImage3DMultisample; +#define glTexImage3DMultisample glad_debug_glTexImage3DMultisample +typedef void (APIENTRYP PFNGLGETMULTISAMPLEFVPROC)(GLenum pname, GLuint index, GLfloat *val); +GLAPI PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv; +GLAPI PFNGLGETMULTISAMPLEFVPROC glad_debug_glGetMultisamplefv; +#define glGetMultisamplefv glad_debug_glGetMultisamplefv +typedef void (APIENTRYP PFNGLSAMPLEMASKIPROC)(GLuint maskNumber, GLbitfield mask); +GLAPI PFNGLSAMPLEMASKIPROC glad_glSampleMaski; +GLAPI PFNGLSAMPLEMASKIPROC glad_debug_glSampleMaski; +#define glSampleMaski glad_debug_glSampleMaski +#endif +#ifndef GL_VERSION_3_3 +#define GL_VERSION_3_3 1 +GLAPI int GLAD_GL_VERSION_3_3; +typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)(GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +GLAPI PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed; +GLAPI PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_debug_glBindFragDataLocationIndexed; +#define glBindFragDataLocationIndexed glad_debug_glBindFragDataLocationIndexed +typedef GLint (APIENTRYP PFNGLGETFRAGDATAINDEXPROC)(GLuint program, const GLchar *name); +GLAPI PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex; +GLAPI PFNGLGETFRAGDATAINDEXPROC glad_debug_glGetFragDataIndex; +#define glGetFragDataIndex glad_debug_glGetFragDataIndex +typedef void (APIENTRYP PFNGLGENSAMPLERSPROC)(GLsizei count, GLuint *samplers); +GLAPI PFNGLGENSAMPLERSPROC glad_glGenSamplers; +GLAPI PFNGLGENSAMPLERSPROC glad_debug_glGenSamplers; +#define glGenSamplers glad_debug_glGenSamplers +typedef void (APIENTRYP PFNGLDELETESAMPLERSPROC)(GLsizei count, const GLuint *samplers); +GLAPI PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers; +GLAPI PFNGLDELETESAMPLERSPROC glad_debug_glDeleteSamplers; +#define glDeleteSamplers glad_debug_glDeleteSamplers +typedef GLboolean (APIENTRYP PFNGLISSAMPLERPROC)(GLuint sampler); +GLAPI PFNGLISSAMPLERPROC glad_glIsSampler; +GLAPI PFNGLISSAMPLERPROC glad_debug_glIsSampler; +#define glIsSampler glad_debug_glIsSampler +typedef void (APIENTRYP PFNGLBINDSAMPLERPROC)(GLuint unit, GLuint sampler); +GLAPI PFNGLBINDSAMPLERPROC glad_glBindSampler; +GLAPI PFNGLBINDSAMPLERPROC glad_debug_glBindSampler; +#define glBindSampler glad_debug_glBindSampler +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIPROC)(GLuint sampler, GLenum pname, GLint param); +GLAPI PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri; +GLAPI PFNGLSAMPLERPARAMETERIPROC glad_debug_glSamplerParameteri; +#define glSamplerParameteri glad_debug_glSamplerParameteri +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, const GLint *param); +GLAPI PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv; +GLAPI PFNGLSAMPLERPARAMETERIVPROC glad_debug_glSamplerParameteriv; +#define glSamplerParameteriv glad_debug_glSamplerParameteriv +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFPROC)(GLuint sampler, GLenum pname, GLfloat param); +GLAPI PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf; +GLAPI PFNGLSAMPLERPARAMETERFPROC glad_debug_glSamplerParameterf; +#define glSamplerParameterf glad_debug_glSamplerParameterf +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, const GLfloat *param); +GLAPI PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv; +GLAPI PFNGLSAMPLERPARAMETERFVPROC glad_debug_glSamplerParameterfv; +#define glSamplerParameterfv glad_debug_glSamplerParameterfv +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, const GLint *param); +GLAPI PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv; +GLAPI PFNGLSAMPLERPARAMETERIIVPROC glad_debug_glSamplerParameterIiv; +#define glSamplerParameterIiv glad_debug_glSamplerParameterIiv +typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, const GLuint *param); +GLAPI PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv; +GLAPI PFNGLSAMPLERPARAMETERIUIVPROC glad_debug_glSamplerParameterIuiv; +#define glSamplerParameterIuiv glad_debug_glSamplerParameterIuiv +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC)(GLuint sampler, GLenum pname, GLint *params); +GLAPI PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv; +GLAPI PFNGLGETSAMPLERPARAMETERIVPROC glad_debug_glGetSamplerParameteriv; +#define glGetSamplerParameteriv glad_debug_glGetSamplerParameteriv +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIIVPROC)(GLuint sampler, GLenum pname, GLint *params); +GLAPI PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv; +GLAPI PFNGLGETSAMPLERPARAMETERIIVPROC glad_debug_glGetSamplerParameterIiv; +#define glGetSamplerParameterIiv glad_debug_glGetSamplerParameterIiv +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC)(GLuint sampler, GLenum pname, GLfloat *params); +GLAPI PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv; +GLAPI PFNGLGETSAMPLERPARAMETERFVPROC glad_debug_glGetSamplerParameterfv; +#define glGetSamplerParameterfv glad_debug_glGetSamplerParameterfv +typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVPROC)(GLuint sampler, GLenum pname, GLuint *params); +GLAPI PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv; +GLAPI PFNGLGETSAMPLERPARAMETERIUIVPROC glad_debug_glGetSamplerParameterIuiv; +#define glGetSamplerParameterIuiv glad_debug_glGetSamplerParameterIuiv +typedef void (APIENTRYP PFNGLQUERYCOUNTERPROC)(GLuint id, GLenum target); +GLAPI PFNGLQUERYCOUNTERPROC glad_glQueryCounter; +GLAPI PFNGLQUERYCOUNTERPROC glad_debug_glQueryCounter; +#define glQueryCounter glad_debug_glQueryCounter +typedef void (APIENTRYP PFNGLGETQUERYOBJECTI64VPROC)(GLuint id, GLenum pname, GLint64 *params); +GLAPI PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v; +GLAPI PFNGLGETQUERYOBJECTI64VPROC glad_debug_glGetQueryObjecti64v; +#define glGetQueryObjecti64v glad_debug_glGetQueryObjecti64v +typedef void (APIENTRYP PFNGLGETQUERYOBJECTUI64VPROC)(GLuint id, GLenum pname, GLuint64 *params); +GLAPI PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v; +GLAPI PFNGLGETQUERYOBJECTUI64VPROC glad_debug_glGetQueryObjectui64v; +#define glGetQueryObjectui64v glad_debug_glGetQueryObjectui64v +typedef void (APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC)(GLuint index, GLuint divisor); +GLAPI PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor; +GLAPI PFNGLVERTEXATTRIBDIVISORPROC glad_debug_glVertexAttribDivisor; +#define glVertexAttribDivisor glad_debug_glVertexAttribDivisor +typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui; +GLAPI PFNGLVERTEXATTRIBP1UIPROC glad_debug_glVertexAttribP1ui; +#define glVertexAttribP1ui glad_debug_glVertexAttribP1ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv; +GLAPI PFNGLVERTEXATTRIBP1UIVPROC glad_debug_glVertexAttribP1uiv; +#define glVertexAttribP1uiv glad_debug_glVertexAttribP1uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui; +GLAPI PFNGLVERTEXATTRIBP2UIPROC glad_debug_glVertexAttribP2ui; +#define glVertexAttribP2ui glad_debug_glVertexAttribP2ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv; +GLAPI PFNGLVERTEXATTRIBP2UIVPROC glad_debug_glVertexAttribP2uiv; +#define glVertexAttribP2uiv glad_debug_glVertexAttribP2uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui; +GLAPI PFNGLVERTEXATTRIBP3UIPROC glad_debug_glVertexAttribP3ui; +#define glVertexAttribP3ui glad_debug_glVertexAttribP3ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv; +GLAPI PFNGLVERTEXATTRIBP3UIVPROC glad_debug_glVertexAttribP3uiv; +#define glVertexAttribP3uiv glad_debug_glVertexAttribP3uiv +typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIPROC)(GLuint index, GLenum type, GLboolean normalized, GLuint value); +GLAPI PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui; +GLAPI PFNGLVERTEXATTRIBP4UIPROC glad_debug_glVertexAttribP4ui; +#define glVertexAttribP4ui glad_debug_glVertexAttribP4ui +typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIVPROC)(GLuint index, GLenum type, GLboolean normalized, const GLuint *value); +GLAPI PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv; +GLAPI PFNGLVERTEXATTRIBP4UIVPROC glad_debug_glVertexAttribP4uiv; +#define glVertexAttribP4uiv glad_debug_glVertexAttribP4uiv +typedef void (APIENTRYP PFNGLVERTEXP2UIPROC)(GLenum type, GLuint value); +GLAPI PFNGLVERTEXP2UIPROC glad_glVertexP2ui; +GLAPI PFNGLVERTEXP2UIPROC glad_debug_glVertexP2ui; +#define glVertexP2ui glad_debug_glVertexP2ui +typedef void (APIENTRYP PFNGLVERTEXP2UIVPROC)(GLenum type, const GLuint *value); +GLAPI PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv; +GLAPI PFNGLVERTEXP2UIVPROC glad_debug_glVertexP2uiv; +#define glVertexP2uiv glad_debug_glVertexP2uiv +typedef void (APIENTRYP PFNGLVERTEXP3UIPROC)(GLenum type, GLuint value); +GLAPI PFNGLVERTEXP3UIPROC glad_glVertexP3ui; +GLAPI PFNGLVERTEXP3UIPROC glad_debug_glVertexP3ui; +#define glVertexP3ui glad_debug_glVertexP3ui +typedef void (APIENTRYP PFNGLVERTEXP3UIVPROC)(GLenum type, const GLuint *value); +GLAPI PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv; +GLAPI PFNGLVERTEXP3UIVPROC glad_debug_glVertexP3uiv; +#define glVertexP3uiv glad_debug_glVertexP3uiv +typedef void (APIENTRYP PFNGLVERTEXP4UIPROC)(GLenum type, GLuint value); +GLAPI PFNGLVERTEXP4UIPROC glad_glVertexP4ui; +GLAPI PFNGLVERTEXP4UIPROC glad_debug_glVertexP4ui; +#define glVertexP4ui glad_debug_glVertexP4ui +typedef void (APIENTRYP PFNGLVERTEXP4UIVPROC)(GLenum type, const GLuint *value); +GLAPI PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv; +GLAPI PFNGLVERTEXP4UIVPROC glad_debug_glVertexP4uiv; +#define glVertexP4uiv glad_debug_glVertexP4uiv +typedef void (APIENTRYP PFNGLTEXCOORDP1UIPROC)(GLenum type, GLuint coords); +GLAPI PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui; +GLAPI PFNGLTEXCOORDP1UIPROC glad_debug_glTexCoordP1ui; +#define glTexCoordP1ui glad_debug_glTexCoordP1ui +typedef void (APIENTRYP PFNGLTEXCOORDP1UIVPROC)(GLenum type, const GLuint *coords); +GLAPI PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv; +GLAPI PFNGLTEXCOORDP1UIVPROC glad_debug_glTexCoordP1uiv; +#define glTexCoordP1uiv glad_debug_glTexCoordP1uiv +typedef void (APIENTRYP PFNGLTEXCOORDP2UIPROC)(GLenum type, GLuint coords); +GLAPI PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui; +GLAPI PFNGLTEXCOORDP2UIPROC glad_debug_glTexCoordP2ui; +#define glTexCoordP2ui glad_debug_glTexCoordP2ui +typedef void (APIENTRYP PFNGLTEXCOORDP2UIVPROC)(GLenum type, const GLuint *coords); +GLAPI PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv; +GLAPI PFNGLTEXCOORDP2UIVPROC glad_debug_glTexCoordP2uiv; +#define glTexCoordP2uiv glad_debug_glTexCoordP2uiv +typedef void (APIENTRYP PFNGLTEXCOORDP3UIPROC)(GLenum type, GLuint coords); +GLAPI PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui; +GLAPI PFNGLTEXCOORDP3UIPROC glad_debug_glTexCoordP3ui; +#define glTexCoordP3ui glad_debug_glTexCoordP3ui +typedef void (APIENTRYP PFNGLTEXCOORDP3UIVPROC)(GLenum type, const GLuint *coords); +GLAPI PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv; +GLAPI PFNGLTEXCOORDP3UIVPROC glad_debug_glTexCoordP3uiv; +#define glTexCoordP3uiv glad_debug_glTexCoordP3uiv +typedef void (APIENTRYP PFNGLTEXCOORDP4UIPROC)(GLenum type, GLuint coords); +GLAPI PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui; +GLAPI PFNGLTEXCOORDP4UIPROC glad_debug_glTexCoordP4ui; +#define glTexCoordP4ui glad_debug_glTexCoordP4ui +typedef void (APIENTRYP PFNGLTEXCOORDP4UIVPROC)(GLenum type, const GLuint *coords); +GLAPI PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv; +GLAPI PFNGLTEXCOORDP4UIVPROC glad_debug_glTexCoordP4uiv; +#define glTexCoordP4uiv glad_debug_glTexCoordP4uiv +typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIPROC)(GLenum texture, GLenum type, GLuint coords); +GLAPI PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui; +GLAPI PFNGLMULTITEXCOORDP1UIPROC glad_debug_glMultiTexCoordP1ui; +#define glMultiTexCoordP1ui glad_debug_glMultiTexCoordP1ui +typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIVPROC)(GLenum texture, GLenum type, const GLuint *coords); +GLAPI PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv; +GLAPI PFNGLMULTITEXCOORDP1UIVPROC glad_debug_glMultiTexCoordP1uiv; +#define glMultiTexCoordP1uiv glad_debug_glMultiTexCoordP1uiv +typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIPROC)(GLenum texture, GLenum type, GLuint coords); +GLAPI PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui; +GLAPI PFNGLMULTITEXCOORDP2UIPROC glad_debug_glMultiTexCoordP2ui; +#define glMultiTexCoordP2ui glad_debug_glMultiTexCoordP2ui +typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIVPROC)(GLenum texture, GLenum type, const GLuint *coords); +GLAPI PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv; +GLAPI PFNGLMULTITEXCOORDP2UIVPROC glad_debug_glMultiTexCoordP2uiv; +#define glMultiTexCoordP2uiv glad_debug_glMultiTexCoordP2uiv +typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIPROC)(GLenum texture, GLenum type, GLuint coords); +GLAPI PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui; +GLAPI PFNGLMULTITEXCOORDP3UIPROC glad_debug_glMultiTexCoordP3ui; +#define glMultiTexCoordP3ui glad_debug_glMultiTexCoordP3ui +typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIVPROC)(GLenum texture, GLenum type, const GLuint *coords); +GLAPI PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv; +GLAPI PFNGLMULTITEXCOORDP3UIVPROC glad_debug_glMultiTexCoordP3uiv; +#define glMultiTexCoordP3uiv glad_debug_glMultiTexCoordP3uiv +typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIPROC)(GLenum texture, GLenum type, GLuint coords); +GLAPI PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui; +GLAPI PFNGLMULTITEXCOORDP4UIPROC glad_debug_glMultiTexCoordP4ui; +#define glMultiTexCoordP4ui glad_debug_glMultiTexCoordP4ui +typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIVPROC)(GLenum texture, GLenum type, const GLuint *coords); +GLAPI PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv; +GLAPI PFNGLMULTITEXCOORDP4UIVPROC glad_debug_glMultiTexCoordP4uiv; +#define glMultiTexCoordP4uiv glad_debug_glMultiTexCoordP4uiv +typedef void (APIENTRYP PFNGLNORMALP3UIPROC)(GLenum type, GLuint coords); +GLAPI PFNGLNORMALP3UIPROC glad_glNormalP3ui; +GLAPI PFNGLNORMALP3UIPROC glad_debug_glNormalP3ui; +#define glNormalP3ui glad_debug_glNormalP3ui +typedef void (APIENTRYP PFNGLNORMALP3UIVPROC)(GLenum type, const GLuint *coords); +GLAPI PFNGLNORMALP3UIVPROC glad_glNormalP3uiv; +GLAPI PFNGLNORMALP3UIVPROC glad_debug_glNormalP3uiv; +#define glNormalP3uiv glad_debug_glNormalP3uiv +typedef void (APIENTRYP PFNGLCOLORP3UIPROC)(GLenum type, GLuint color); +GLAPI PFNGLCOLORP3UIPROC glad_glColorP3ui; +GLAPI PFNGLCOLORP3UIPROC glad_debug_glColorP3ui; +#define glColorP3ui glad_debug_glColorP3ui +typedef void (APIENTRYP PFNGLCOLORP3UIVPROC)(GLenum type, const GLuint *color); +GLAPI PFNGLCOLORP3UIVPROC glad_glColorP3uiv; +GLAPI PFNGLCOLORP3UIVPROC glad_debug_glColorP3uiv; +#define glColorP3uiv glad_debug_glColorP3uiv +typedef void (APIENTRYP PFNGLCOLORP4UIPROC)(GLenum type, GLuint color); +GLAPI PFNGLCOLORP4UIPROC glad_glColorP4ui; +GLAPI PFNGLCOLORP4UIPROC glad_debug_glColorP4ui; +#define glColorP4ui glad_debug_glColorP4ui +typedef void (APIENTRYP PFNGLCOLORP4UIVPROC)(GLenum type, const GLuint *color); +GLAPI PFNGLCOLORP4UIVPROC glad_glColorP4uiv; +GLAPI PFNGLCOLORP4UIVPROC glad_debug_glColorP4uiv; +#define glColorP4uiv glad_debug_glColorP4uiv +typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIPROC)(GLenum type, GLuint color); +GLAPI PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui; +GLAPI PFNGLSECONDARYCOLORP3UIPROC glad_debug_glSecondaryColorP3ui; +#define glSecondaryColorP3ui glad_debug_glSecondaryColorP3ui +typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIVPROC)(GLenum type, const GLuint *color); +GLAPI PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv; +GLAPI PFNGLSECONDARYCOLORP3UIVPROC glad_debug_glSecondaryColorP3uiv; +#define glSecondaryColorP3uiv glad_debug_glSecondaryColorP3uiv +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/gl/glfw3.h b/gl/glfw3.h new file mode 100755 index 0000000..671ba72 --- /dev/null +++ b/gl/glfw3.h @@ -0,0 +1,5873 @@ +/************************************************************************* + * GLFW 3.3 - www.glfw.org + * A library for OpenGL, window and input + *------------------------------------------------------------------------ + * Copyright (c) 2002-2006 Marcus Geelnard + * Copyright (c) 2006-2019 Camilla Löwy + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would + * be appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + * + *************************************************************************/ + +#ifndef _glfw3_h_ +#define _glfw3_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/************************************************************************* + * Doxygen documentation + *************************************************************************/ + +/*! @file glfw3.h + * @brief The header of the GLFW 3 API. + * + * This is the header file of the GLFW 3 API. It defines all its types and + * declares all its functions. + * + * For more information about how to use this file, see @ref build_include. + */ +/*! @defgroup context Context reference + * @brief Functions and types related to OpenGL and OpenGL ES contexts. + * + * This is the reference documentation for OpenGL and OpenGL ES context related + * functions. For more task-oriented information, see the @ref context_guide. + */ +/*! @defgroup vulkan Vulkan reference + * @brief Functions and types related to Vulkan. + * + * This is the reference documentation for Vulkan related functions and types. + * For more task-oriented information, see the @ref vulkan_guide. + */ +/*! @defgroup init Initialization, version and error reference + * @brief Functions and types related to initialization and error handling. + * + * This is the reference documentation for initialization and termination of + * the library, version management and error handling. For more task-oriented + * information, see the @ref intro_guide. + */ +/*! @defgroup input Input reference + * @brief Functions and types related to input handling. + * + * This is the reference documentation for input related functions and types. + * For more task-oriented information, see the @ref input_guide. + */ +/*! @defgroup monitor Monitor reference + * @brief Functions and types related to monitors. + * + * This is the reference documentation for monitor related functions and types. + * For more task-oriented information, see the @ref monitor_guide. + */ +/*! @defgroup window Window reference + * @brief Functions and types related to windows. + * + * This is the reference documentation for window related functions and types, + * including creation, deletion and event polling. For more task-oriented + * information, see the @ref window_guide. + */ + + +/************************************************************************* + * Compiler- and platform-specific preprocessor work + *************************************************************************/ + +/* If we are we on Windows, we want a single define for it. + */ +#if !defined(_WIN32) && (defined(__WIN32__) || defined(WIN32) || defined(__MINGW32__)) + #define _WIN32 +#endif /* _WIN32 */ + +/* Include because most Windows GLU headers need wchar_t and + * the macOS OpenGL header blocks the definition of ptrdiff_t by glext.h. + * Include it unconditionally to avoid surprising side-effects. + */ +#include + +/* Include because it is needed by Vulkan and related functions. + * Include it unconditionally to avoid surprising side-effects. + */ +#include + +#if defined(GLFW_INCLUDE_VULKAN) + #include +#endif /* Vulkan header */ + +/* The Vulkan header may have indirectly included windows.h (because of + * VK_USE_PLATFORM_WIN32_KHR) so we offer our replacement symbols after it. + */ + +/* It is customary to use APIENTRY for OpenGL function pointer declarations on + * all platforms. Additionally, the Windows OpenGL header needs APIENTRY. + */ +#if !defined(APIENTRY) + #if defined(_WIN32) + #define APIENTRY __stdcall + #else + #define APIENTRY + #endif + #define GLFW_APIENTRY_DEFINED +#endif /* APIENTRY */ + +/* Some Windows OpenGL headers need this. + */ +#if !defined(WINGDIAPI) && defined(_WIN32) + #define WINGDIAPI __declspec(dllimport) + #define GLFW_WINGDIAPI_DEFINED +#endif /* WINGDIAPI */ + +/* Some Windows GLU headers need this. + */ +#if !defined(CALLBACK) && defined(_WIN32) + #define CALLBACK __stdcall + #define GLFW_CALLBACK_DEFINED +#endif /* CALLBACK */ + +/* Include the chosen OpenGL or OpenGL ES headers. + */ +#if defined(GLFW_INCLUDE_ES1) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES2) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES3) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES31) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_ES32) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + +#elif defined(GLFW_INCLUDE_GLCOREARB) + + #if defined(__APPLE__) + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif /*GLFW_INCLUDE_GLEXT*/ + + #else /*__APPLE__*/ + + #include + + #endif /*__APPLE__*/ + +#elif !defined(GLFW_INCLUDE_NONE) + + #if defined(__APPLE__) + + #if !defined(GLFW_INCLUDE_GLEXT) + #define GL_GLEXT_LEGACY + #endif + #include + #if defined(GLFW_INCLUDE_GLU) + #include + #endif + + #else /*__APPLE__*/ + + #include + #if defined(GLFW_INCLUDE_GLEXT) + #include + #endif + #if defined(GLFW_INCLUDE_GLU) + #include + #endif + + #endif /*__APPLE__*/ + +#endif /* OpenGL and OpenGL ES headers */ + +#if defined(GLFW_DLL) && defined(_GLFW_BUILD_DLL) + /* GLFW_DLL must be defined by applications that are linking against the DLL + * version of the GLFW library. _GLFW_BUILD_DLL is defined by the GLFW + * configuration header when compiling the DLL version of the library. + */ + #error "You must not have both GLFW_DLL and _GLFW_BUILD_DLL defined" +#endif + +/* GLFWAPI is used to declare public API functions for export + * from the DLL / shared library / dynamic library. + */ +#if defined(_WIN32) && defined(_GLFW_BUILD_DLL) + /* We are building GLFW as a Win32 DLL */ + #define GLFWAPI __declspec(dllexport) +#elif defined(_WIN32) && defined(GLFW_DLL) + /* We are calling GLFW as a Win32 DLL */ + #define GLFWAPI __declspec(dllimport) +#elif defined(__GNUC__) && defined(_GLFW_BUILD_DLL) + /* We are building GLFW as a shared / dynamic library */ + #define GLFWAPI __attribute__((visibility("default"))) +#else + /* We are building or calling GLFW as a static library */ + #define GLFWAPI +#endif + + +/************************************************************************* + * GLFW API tokens + *************************************************************************/ + +/*! @name GLFW version macros + * @{ */ +/*! @brief The major version number of the GLFW library. + * + * This is incremented when the API is changed in non-compatible ways. + * @ingroup init + */ +#define GLFW_VERSION_MAJOR 3 +/*! @brief The minor version number of the GLFW library. + * + * This is incremented when features are added to the API but it remains + * backward-compatible. + * @ingroup init + */ +#define GLFW_VERSION_MINOR 3 +/*! @brief The revision number of the GLFW library. + * + * This is incremented when a bug fix release is made that does not contain any + * API changes. + * @ingroup init + */ +#define GLFW_VERSION_REVISION 1 +/*! @} */ + +/*! @brief One. + * + * This is only semantic sugar for the number 1. You can instead use `1` or + * `true` or `_True` or `GL_TRUE` or `VK_TRUE` or anything else that is equal + * to one. + * + * @ingroup init + */ +#define GLFW_TRUE 1 +/*! @brief Zero. + * + * This is only semantic sugar for the number 0. You can instead use `0` or + * `false` or `_False` or `GL_FALSE` or `VK_FALSE` or anything else that is + * equal to zero. + * + * @ingroup init + */ +#define GLFW_FALSE 0 + +/*! @name Key and button actions + * @{ */ +/*! @brief The key or mouse button was released. + * + * The key or mouse button was released. + * + * @ingroup input + */ +#define GLFW_RELEASE 0 +/*! @brief The key or mouse button was pressed. + * + * The key or mouse button was pressed. + * + * @ingroup input + */ +#define GLFW_PRESS 1 +/*! @brief The key was held down until it repeated. + * + * The key was held down until it repeated. + * + * @ingroup input + */ +#define GLFW_REPEAT 2 +/*! @} */ + +/*! @defgroup hat_state Joystick hat states + * @brief Joystick hat states. + * + * See [joystick hat input](@ref joystick_hat) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_HAT_CENTERED 0 +#define GLFW_HAT_UP 1 +#define GLFW_HAT_RIGHT 2 +#define GLFW_HAT_DOWN 4 +#define GLFW_HAT_LEFT 8 +#define GLFW_HAT_RIGHT_UP (GLFW_HAT_RIGHT | GLFW_HAT_UP) +#define GLFW_HAT_RIGHT_DOWN (GLFW_HAT_RIGHT | GLFW_HAT_DOWN) +#define GLFW_HAT_LEFT_UP (GLFW_HAT_LEFT | GLFW_HAT_UP) +#define GLFW_HAT_LEFT_DOWN (GLFW_HAT_LEFT | GLFW_HAT_DOWN) +/*! @} */ + +/*! @defgroup keys Keyboard keys + * @brief Keyboard key IDs. + * + * See [key input](@ref input_key) for how these are used. + * + * These key codes are inspired by the _USB HID Usage Tables v1.12_ (p. 53-60), + * but re-arranged to map to 7-bit ASCII for printable keys (function keys are + * put in the 256+ range). + * + * The naming of the key codes follow these rules: + * - The US keyboard layout is used + * - Names of printable alpha-numeric characters are used (e.g. "A", "R", + * "3", etc.) + * - For non-alphanumeric characters, Unicode:ish names are used (e.g. + * "COMMA", "LEFT_SQUARE_BRACKET", etc.). Note that some names do not + * correspond to the Unicode standard (usually for brevity) + * - Keys that lack a clear US mapping are named "WORLD_x" + * - For non-printable keys, custom names are used (e.g. "F4", + * "BACKSPACE", etc.) + * + * @ingroup input + * @{ + */ + +/* The unknown key */ +#define GLFW_KEY_UNKNOWN -1 + +/* Printable keys */ +#define GLFW_KEY_SPACE 32 +#define GLFW_KEY_APOSTROPHE 39 /* ' */ +#define GLFW_KEY_COMMA 44 /* , */ +#define GLFW_KEY_MINUS 45 /* - */ +#define GLFW_KEY_PERIOD 46 /* . */ +#define GLFW_KEY_SLASH 47 /* / */ +#define GLFW_KEY_0 48 +#define GLFW_KEY_1 49 +#define GLFW_KEY_2 50 +#define GLFW_KEY_3 51 +#define GLFW_KEY_4 52 +#define GLFW_KEY_5 53 +#define GLFW_KEY_6 54 +#define GLFW_KEY_7 55 +#define GLFW_KEY_8 56 +#define GLFW_KEY_9 57 +#define GLFW_KEY_SEMICOLON 59 /* ; */ +#define GLFW_KEY_EQUAL 61 /* = */ +#define GLFW_KEY_A 65 +#define GLFW_KEY_B 66 +#define GLFW_KEY_C 67 +#define GLFW_KEY_D 68 +#define GLFW_KEY_E 69 +#define GLFW_KEY_F 70 +#define GLFW_KEY_G 71 +#define GLFW_KEY_H 72 +#define GLFW_KEY_I 73 +#define GLFW_KEY_J 74 +#define GLFW_KEY_K 75 +#define GLFW_KEY_L 76 +#define GLFW_KEY_M 77 +#define GLFW_KEY_N 78 +#define GLFW_KEY_O 79 +#define GLFW_KEY_P 80 +#define GLFW_KEY_Q 81 +#define GLFW_KEY_R 82 +#define GLFW_KEY_S 83 +#define GLFW_KEY_T 84 +#define GLFW_KEY_U 85 +#define GLFW_KEY_V 86 +#define GLFW_KEY_W 87 +#define GLFW_KEY_X 88 +#define GLFW_KEY_Y 89 +#define GLFW_KEY_Z 90 +#define GLFW_KEY_LEFT_BRACKET 91 /* [ */ +#define GLFW_KEY_BACKSLASH 92 /* \ */ +#define GLFW_KEY_RIGHT_BRACKET 93 /* ] */ +#define GLFW_KEY_GRAVE_ACCENT 96 /* ` */ +#define GLFW_KEY_WORLD_1 161 /* non-US #1 */ +#define GLFW_KEY_WORLD_2 162 /* non-US #2 */ + +/* Function keys */ +#define GLFW_KEY_ESCAPE 256 +#define GLFW_KEY_ENTER 257 +#define GLFW_KEY_TAB 258 +#define GLFW_KEY_BACKSPACE 259 +#define GLFW_KEY_INSERT 260 +#define GLFW_KEY_DELETE 261 +#define GLFW_KEY_RIGHT 262 +#define GLFW_KEY_LEFT 263 +#define GLFW_KEY_DOWN 264 +#define GLFW_KEY_UP 265 +#define GLFW_KEY_PAGE_UP 266 +#define GLFW_KEY_PAGE_DOWN 267 +#define GLFW_KEY_HOME 268 +#define GLFW_KEY_END 269 +#define GLFW_KEY_CAPS_LOCK 280 +#define GLFW_KEY_SCROLL_LOCK 281 +#define GLFW_KEY_NUM_LOCK 282 +#define GLFW_KEY_PRINT_SCREEN 283 +#define GLFW_KEY_PAUSE 284 +#define GLFW_KEY_F1 290 +#define GLFW_KEY_F2 291 +#define GLFW_KEY_F3 292 +#define GLFW_KEY_F4 293 +#define GLFW_KEY_F5 294 +#define GLFW_KEY_F6 295 +#define GLFW_KEY_F7 296 +#define GLFW_KEY_F8 297 +#define GLFW_KEY_F9 298 +#define GLFW_KEY_F10 299 +#define GLFW_KEY_F11 300 +#define GLFW_KEY_F12 301 +#define GLFW_KEY_F13 302 +#define GLFW_KEY_F14 303 +#define GLFW_KEY_F15 304 +#define GLFW_KEY_F16 305 +#define GLFW_KEY_F17 306 +#define GLFW_KEY_F18 307 +#define GLFW_KEY_F19 308 +#define GLFW_KEY_F20 309 +#define GLFW_KEY_F21 310 +#define GLFW_KEY_F22 311 +#define GLFW_KEY_F23 312 +#define GLFW_KEY_F24 313 +#define GLFW_KEY_F25 314 +#define GLFW_KEY_KP_0 320 +#define GLFW_KEY_KP_1 321 +#define GLFW_KEY_KP_2 322 +#define GLFW_KEY_KP_3 323 +#define GLFW_KEY_KP_4 324 +#define GLFW_KEY_KP_5 325 +#define GLFW_KEY_KP_6 326 +#define GLFW_KEY_KP_7 327 +#define GLFW_KEY_KP_8 328 +#define GLFW_KEY_KP_9 329 +#define GLFW_KEY_KP_DECIMAL 330 +#define GLFW_KEY_KP_DIVIDE 331 +#define GLFW_KEY_KP_MULTIPLY 332 +#define GLFW_KEY_KP_SUBTRACT 333 +#define GLFW_KEY_KP_ADD 334 +#define GLFW_KEY_KP_ENTER 335 +#define GLFW_KEY_KP_EQUAL 336 +#define GLFW_KEY_LEFT_SHIFT 340 +#define GLFW_KEY_LEFT_CONTROL 341 +#define GLFW_KEY_LEFT_ALT 342 +#define GLFW_KEY_LEFT_SUPER 343 +#define GLFW_KEY_RIGHT_SHIFT 344 +#define GLFW_KEY_RIGHT_CONTROL 345 +#define GLFW_KEY_RIGHT_ALT 346 +#define GLFW_KEY_RIGHT_SUPER 347 +#define GLFW_KEY_MENU 348 + +#define GLFW_KEY_LAST GLFW_KEY_MENU + +/*! @} */ + +/*! @defgroup mods Modifier key flags + * @brief Modifier key flags. + * + * See [key input](@ref input_key) for how these are used. + * + * @ingroup input + * @{ */ + +/*! @brief If this bit is set one or more Shift keys were held down. + * + * If this bit is set one or more Shift keys were held down. + */ +#define GLFW_MOD_SHIFT 0x0001 +/*! @brief If this bit is set one or more Control keys were held down. + * + * If this bit is set one or more Control keys were held down. + */ +#define GLFW_MOD_CONTROL 0x0002 +/*! @brief If this bit is set one or more Alt keys were held down. + * + * If this bit is set one or more Alt keys were held down. + */ +#define GLFW_MOD_ALT 0x0004 +/*! @brief If this bit is set one or more Super keys were held down. + * + * If this bit is set one or more Super keys were held down. + */ +#define GLFW_MOD_SUPER 0x0008 +/*! @brief If this bit is set the Caps Lock key is enabled. + * + * If this bit is set the Caps Lock key is enabled and the @ref + * GLFW_LOCK_KEY_MODS input mode is set. + */ +#define GLFW_MOD_CAPS_LOCK 0x0010 +/*! @brief If this bit is set the Num Lock key is enabled. + * + * If this bit is set the Num Lock key is enabled and the @ref + * GLFW_LOCK_KEY_MODS input mode is set. + */ +#define GLFW_MOD_NUM_LOCK 0x0020 + +/*! @} */ + +/*! @defgroup buttons Mouse buttons + * @brief Mouse button IDs. + * + * See [mouse button input](@ref input_mouse_button) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_MOUSE_BUTTON_1 0 +#define GLFW_MOUSE_BUTTON_2 1 +#define GLFW_MOUSE_BUTTON_3 2 +#define GLFW_MOUSE_BUTTON_4 3 +#define GLFW_MOUSE_BUTTON_5 4 +#define GLFW_MOUSE_BUTTON_6 5 +#define GLFW_MOUSE_BUTTON_7 6 +#define GLFW_MOUSE_BUTTON_8 7 +#define GLFW_MOUSE_BUTTON_LAST GLFW_MOUSE_BUTTON_8 +#define GLFW_MOUSE_BUTTON_LEFT GLFW_MOUSE_BUTTON_1 +#define GLFW_MOUSE_BUTTON_RIGHT GLFW_MOUSE_BUTTON_2 +#define GLFW_MOUSE_BUTTON_MIDDLE GLFW_MOUSE_BUTTON_3 +/*! @} */ + +/*! @defgroup joysticks Joysticks + * @brief Joystick IDs. + * + * See [joystick input](@ref joystick) for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_JOYSTICK_1 0 +#define GLFW_JOYSTICK_2 1 +#define GLFW_JOYSTICK_3 2 +#define GLFW_JOYSTICK_4 3 +#define GLFW_JOYSTICK_5 4 +#define GLFW_JOYSTICK_6 5 +#define GLFW_JOYSTICK_7 6 +#define GLFW_JOYSTICK_8 7 +#define GLFW_JOYSTICK_9 8 +#define GLFW_JOYSTICK_10 9 +#define GLFW_JOYSTICK_11 10 +#define GLFW_JOYSTICK_12 11 +#define GLFW_JOYSTICK_13 12 +#define GLFW_JOYSTICK_14 13 +#define GLFW_JOYSTICK_15 14 +#define GLFW_JOYSTICK_16 15 +#define GLFW_JOYSTICK_LAST GLFW_JOYSTICK_16 +/*! @} */ + +/*! @defgroup gamepad_buttons Gamepad buttons + * @brief Gamepad buttons. + * + * See @ref gamepad for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_GAMEPAD_BUTTON_A 0 +#define GLFW_GAMEPAD_BUTTON_B 1 +#define GLFW_GAMEPAD_BUTTON_X 2 +#define GLFW_GAMEPAD_BUTTON_Y 3 +#define GLFW_GAMEPAD_BUTTON_LEFT_BUMPER 4 +#define GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER 5 +#define GLFW_GAMEPAD_BUTTON_BACK 6 +#define GLFW_GAMEPAD_BUTTON_START 7 +#define GLFW_GAMEPAD_BUTTON_GUIDE 8 +#define GLFW_GAMEPAD_BUTTON_LEFT_THUMB 9 +#define GLFW_GAMEPAD_BUTTON_RIGHT_THUMB 10 +#define GLFW_GAMEPAD_BUTTON_DPAD_UP 11 +#define GLFW_GAMEPAD_BUTTON_DPAD_RIGHT 12 +#define GLFW_GAMEPAD_BUTTON_DPAD_DOWN 13 +#define GLFW_GAMEPAD_BUTTON_DPAD_LEFT 14 +#define GLFW_GAMEPAD_BUTTON_LAST GLFW_GAMEPAD_BUTTON_DPAD_LEFT + +#define GLFW_GAMEPAD_BUTTON_CROSS GLFW_GAMEPAD_BUTTON_A +#define GLFW_GAMEPAD_BUTTON_CIRCLE GLFW_GAMEPAD_BUTTON_B +#define GLFW_GAMEPAD_BUTTON_SQUARE GLFW_GAMEPAD_BUTTON_X +#define GLFW_GAMEPAD_BUTTON_TRIANGLE GLFW_GAMEPAD_BUTTON_Y +/*! @} */ + +/*! @defgroup gamepad_axes Gamepad axes + * @brief Gamepad axes. + * + * See @ref gamepad for how these are used. + * + * @ingroup input + * @{ */ +#define GLFW_GAMEPAD_AXIS_LEFT_X 0 +#define GLFW_GAMEPAD_AXIS_LEFT_Y 1 +#define GLFW_GAMEPAD_AXIS_RIGHT_X 2 +#define GLFW_GAMEPAD_AXIS_RIGHT_Y 3 +#define GLFW_GAMEPAD_AXIS_LEFT_TRIGGER 4 +#define GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER 5 +#define GLFW_GAMEPAD_AXIS_LAST GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER +/*! @} */ + +/*! @defgroup errors Error codes + * @brief Error codes. + * + * See [error handling](@ref error_handling) for how these are used. + * + * @ingroup init + * @{ */ +/*! @brief No error has occurred. + * + * No error has occurred. + * + * @analysis Yay. + */ +#define GLFW_NO_ERROR 0 +/*! @brief GLFW has not been initialized. + * + * This occurs if a GLFW function was called that must not be called unless the + * library is [initialized](@ref intro_init). + * + * @analysis Application programmer error. Initialize GLFW before calling any + * function that requires initialization. + */ +#define GLFW_NOT_INITIALIZED 0x00010001 +/*! @brief No context is current for this thread. + * + * This occurs if a GLFW function was called that needs and operates on the + * current OpenGL or OpenGL ES context but no context is current on the calling + * thread. One such function is @ref glfwSwapInterval. + * + * @analysis Application programmer error. Ensure a context is current before + * calling functions that require a current context. + */ +#define GLFW_NO_CURRENT_CONTEXT 0x00010002 +/*! @brief One of the arguments to the function was an invalid enum value. + * + * One of the arguments to the function was an invalid enum value, for example + * requesting @ref GLFW_RED_BITS with @ref glfwGetWindowAttrib. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_INVALID_ENUM 0x00010003 +/*! @brief One of the arguments to the function was an invalid value. + * + * One of the arguments to the function was an invalid value, for example + * requesting a non-existent OpenGL or OpenGL ES version like 2.7. + * + * Requesting a valid but unavailable OpenGL or OpenGL ES version will instead + * result in a @ref GLFW_VERSION_UNAVAILABLE error. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_INVALID_VALUE 0x00010004 +/*! @brief A memory allocation failed. + * + * A memory allocation failed. + * + * @analysis A bug in GLFW or the underlying operating system. Report the bug + * to our [issue tracker](https://github.com/glfw/glfw/issues). + */ +#define GLFW_OUT_OF_MEMORY 0x00010005 +/*! @brief GLFW could not find support for the requested API on the system. + * + * GLFW could not find support for the requested API on the system. + * + * @analysis The installed graphics driver does not support the requested + * API, or does not support it via the chosen context creation backend. + * Below are a few examples. + * + * @par + * Some pre-installed Windows graphics drivers do not support OpenGL. AMD only + * supports OpenGL ES via EGL, while Nvidia and Intel only support it via + * a WGL or GLX extension. macOS does not provide OpenGL ES at all. The Mesa + * EGL, OpenGL and OpenGL ES libraries do not interface with the Nvidia binary + * driver. Older graphics drivers do not support Vulkan. + */ +#define GLFW_API_UNAVAILABLE 0x00010006 +/*! @brief The requested OpenGL or OpenGL ES version is not available. + * + * The requested OpenGL or OpenGL ES version (including any requested context + * or framebuffer hints) is not available on this machine. + * + * @analysis The machine does not support your requirements. If your + * application is sufficiently flexible, downgrade your requirements and try + * again. Otherwise, inform the user that their machine does not match your + * requirements. + * + * @par + * Future invalid OpenGL and OpenGL ES versions, for example OpenGL 4.8 if 5.0 + * comes out before the 4.x series gets that far, also fail with this error and + * not @ref GLFW_INVALID_VALUE, because GLFW cannot know what future versions + * will exist. + */ +#define GLFW_VERSION_UNAVAILABLE 0x00010007 +/*! @brief A platform-specific error occurred that does not match any of the + * more specific categories. + * + * A platform-specific error occurred that does not match any of the more + * specific categories. + * + * @analysis A bug or configuration error in GLFW, the underlying operating + * system or its drivers, or a lack of required resources. Report the issue to + * our [issue tracker](https://github.com/glfw/glfw/issues). + */ +#define GLFW_PLATFORM_ERROR 0x00010008 +/*! @brief The requested format is not supported or available. + * + * If emitted during window creation, the requested pixel format is not + * supported. + * + * If emitted when querying the clipboard, the contents of the clipboard could + * not be converted to the requested format. + * + * @analysis If emitted during window creation, one or more + * [hard constraints](@ref window_hints_hard) did not match any of the + * available pixel formats. If your application is sufficiently flexible, + * downgrade your requirements and try again. Otherwise, inform the user that + * their machine does not match your requirements. + * + * @par + * If emitted when querying the clipboard, ignore the error or report it to + * the user, as appropriate. + */ +#define GLFW_FORMAT_UNAVAILABLE 0x00010009 +/*! @brief The specified window does not have an OpenGL or OpenGL ES context. + * + * A window that does not have an OpenGL or OpenGL ES context was passed to + * a function that requires it to have one. + * + * @analysis Application programmer error. Fix the offending call. + */ +#define GLFW_NO_WINDOW_CONTEXT 0x0001000A +/*! @} */ + +/*! @addtogroup window + * @{ */ +/*! @brief Input focus window hint and attribute + * + * Input focus [window hint](@ref GLFW_FOCUSED_hint) or + * [window attribute](@ref GLFW_FOCUSED_attrib). + */ +#define GLFW_FOCUSED 0x00020001 +/*! @brief Window iconification window attribute + * + * Window iconification [window attribute](@ref GLFW_ICONIFIED_attrib). + */ +#define GLFW_ICONIFIED 0x00020002 +/*! @brief Window resize-ability window hint and attribute + * + * Window resize-ability [window hint](@ref GLFW_RESIZABLE_hint) and + * [window attribute](@ref GLFW_RESIZABLE_attrib). + */ +#define GLFW_RESIZABLE 0x00020003 +/*! @brief Window visibility window hint and attribute + * + * Window visibility [window hint](@ref GLFW_VISIBLE_hint) and + * [window attribute](@ref GLFW_VISIBLE_attrib). + */ +#define GLFW_VISIBLE 0x00020004 +/*! @brief Window decoration window hint and attribute + * + * Window decoration [window hint](@ref GLFW_DECORATED_hint) and + * [window attribute](@ref GLFW_DECORATED_attrib). + */ +#define GLFW_DECORATED 0x00020005 +/*! @brief Window auto-iconification window hint and attribute + * + * Window auto-iconification [window hint](@ref GLFW_AUTO_ICONIFY_hint) and + * [window attribute](@ref GLFW_AUTO_ICONIFY_attrib). + */ +#define GLFW_AUTO_ICONIFY 0x00020006 +/*! @brief Window decoration window hint and attribute + * + * Window decoration [window hint](@ref GLFW_FLOATING_hint) and + * [window attribute](@ref GLFW_FLOATING_attrib). + */ +#define GLFW_FLOATING 0x00020007 +/*! @brief Window maximization window hint and attribute + * + * Window maximization [window hint](@ref GLFW_MAXIMIZED_hint) and + * [window attribute](@ref GLFW_MAXIMIZED_attrib). + */ +#define GLFW_MAXIMIZED 0x00020008 +/*! @brief Cursor centering window hint + * + * Cursor centering [window hint](@ref GLFW_CENTER_CURSOR_hint). + */ +#define GLFW_CENTER_CURSOR 0x00020009 +/*! @brief Window framebuffer transparency hint and attribute + * + * Window framebuffer transparency + * [window hint](@ref GLFW_TRANSPARENT_FRAMEBUFFER_hint) and + * [window attribute](@ref GLFW_TRANSPARENT_FRAMEBUFFER_attrib). + */ +#define GLFW_TRANSPARENT_FRAMEBUFFER 0x0002000A +/*! @brief Mouse cursor hover window attribute. + * + * Mouse cursor hover [window attribute](@ref GLFW_HOVERED_attrib). + */ +#define GLFW_HOVERED 0x0002000B +/*! @brief Input focus on calling show window hint and attribute + * + * Input focus [window hint](@ref GLFW_FOCUS_ON_SHOW_hint) or + * [window attribute](@ref GLFW_FOCUS_ON_SHOW_attrib). + */ +#define GLFW_FOCUS_ON_SHOW 0x0002000C + +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_RED_BITS). + */ +#define GLFW_RED_BITS 0x00021001 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_GREEN_BITS). + */ +#define GLFW_GREEN_BITS 0x00021002 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_BLUE_BITS). + */ +#define GLFW_BLUE_BITS 0x00021003 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ALPHA_BITS). + */ +#define GLFW_ALPHA_BITS 0x00021004 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_DEPTH_BITS). + */ +#define GLFW_DEPTH_BITS 0x00021005 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_STENCIL_BITS). + */ +#define GLFW_STENCIL_BITS 0x00021006 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_RED_BITS). + */ +#define GLFW_ACCUM_RED_BITS 0x00021007 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_GREEN_BITS). + */ +#define GLFW_ACCUM_GREEN_BITS 0x00021008 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_BLUE_BITS). + */ +#define GLFW_ACCUM_BLUE_BITS 0x00021009 +/*! @brief Framebuffer bit depth hint. + * + * Framebuffer bit depth [hint](@ref GLFW_ACCUM_ALPHA_BITS). + */ +#define GLFW_ACCUM_ALPHA_BITS 0x0002100A +/*! @brief Framebuffer auxiliary buffer hint. + * + * Framebuffer auxiliary buffer [hint](@ref GLFW_AUX_BUFFERS). + */ +#define GLFW_AUX_BUFFERS 0x0002100B +/*! @brief OpenGL stereoscopic rendering hint. + * + * OpenGL stereoscopic rendering [hint](@ref GLFW_STEREO). + */ +#define GLFW_STEREO 0x0002100C +/*! @brief Framebuffer MSAA samples hint. + * + * Framebuffer MSAA samples [hint](@ref GLFW_SAMPLES). + */ +#define GLFW_SAMPLES 0x0002100D +/*! @brief Framebuffer sRGB hint. + * + * Framebuffer sRGB [hint](@ref GLFW_SRGB_CAPABLE). + */ +#define GLFW_SRGB_CAPABLE 0x0002100E +/*! @brief Monitor refresh rate hint. + * + * Monitor refresh rate [hint](@ref GLFW_REFRESH_RATE). + */ +#define GLFW_REFRESH_RATE 0x0002100F +/*! @brief Framebuffer double buffering hint. + * + * Framebuffer double buffering [hint](@ref GLFW_DOUBLEBUFFER). + */ +#define GLFW_DOUBLEBUFFER 0x00021010 + +/*! @brief Context client API hint and attribute. + * + * Context client API [hint](@ref GLFW_CLIENT_API_hint) and + * [attribute](@ref GLFW_CLIENT_API_attrib). + */ +#define GLFW_CLIENT_API 0x00022001 +/*! @brief Context client API major version hint and attribute. + * + * Context client API major version [hint](@ref GLFW_CONTEXT_VERSION_MAJOR_hint) + * and [attribute](@ref GLFW_CONTEXT_VERSION_MAJOR_attrib). + */ +#define GLFW_CONTEXT_VERSION_MAJOR 0x00022002 +/*! @brief Context client API minor version hint and attribute. + * + * Context client API minor version [hint](@ref GLFW_CONTEXT_VERSION_MINOR_hint) + * and [attribute](@ref GLFW_CONTEXT_VERSION_MINOR_attrib). + */ +#define GLFW_CONTEXT_VERSION_MINOR 0x00022003 +/*! @brief Context client API revision number hint and attribute. + * + * Context client API revision number + * [attribute](@ref GLFW_CONTEXT_REVISION_attrib). + */ +#define GLFW_CONTEXT_REVISION 0x00022004 +/*! @brief Context robustness hint and attribute. + * + * Context client API revision number [hint](@ref GLFW_CONTEXT_ROBUSTNESS_hint) + * and [attribute](@ref GLFW_CONTEXT_ROBUSTNESS_attrib). + */ +#define GLFW_CONTEXT_ROBUSTNESS 0x00022005 +/*! @brief OpenGL forward-compatibility hint and attribute. + * + * OpenGL forward-compatibility [hint](@ref GLFW_OPENGL_FORWARD_COMPAT_hint) + * and [attribute](@ref GLFW_OPENGL_FORWARD_COMPAT_attrib). + */ +#define GLFW_OPENGL_FORWARD_COMPAT 0x00022006 +/*! @brief OpenGL debug context hint and attribute. + * + * OpenGL debug context [hint](@ref GLFW_OPENGL_DEBUG_CONTEXT_hint) and + * [attribute](@ref GLFW_OPENGL_DEBUG_CONTEXT_attrib). + */ +#define GLFW_OPENGL_DEBUG_CONTEXT 0x00022007 +/*! @brief OpenGL profile hint and attribute. + * + * OpenGL profile [hint](@ref GLFW_OPENGL_PROFILE_hint) and + * [attribute](@ref GLFW_OPENGL_PROFILE_attrib). + */ +#define GLFW_OPENGL_PROFILE 0x00022008 +/*! @brief Context flush-on-release hint and attribute. + * + * Context flush-on-release [hint](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint) and + * [attribute](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_attrib). + */ +#define GLFW_CONTEXT_RELEASE_BEHAVIOR 0x00022009 +/*! @brief Context error suppression hint and attribute. + * + * Context error suppression [hint](@ref GLFW_CONTEXT_NO_ERROR_hint) and + * [attribute](@ref GLFW_CONTEXT_NO_ERROR_attrib). + */ +#define GLFW_CONTEXT_NO_ERROR 0x0002200A +/*! @brief Context creation API hint and attribute. + * + * Context creation API [hint](@ref GLFW_CONTEXT_CREATION_API_hint) and + * [attribute](@ref GLFW_CONTEXT_CREATION_API_attrib). + */ +#define GLFW_CONTEXT_CREATION_API 0x0002200B +/*! @brief Window content area scaling window + * [window hint](@ref GLFW_SCALE_TO_MONITOR). + */ +#define GLFW_SCALE_TO_MONITOR 0x0002200C +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint). + */ +#define GLFW_COCOA_RETINA_FRAMEBUFFER 0x00023001 +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_FRAME_NAME_hint). + */ +#define GLFW_COCOA_FRAME_NAME 0x00023002 +/*! @brief macOS specific + * [window hint](@ref GLFW_COCOA_GRAPHICS_SWITCHING_hint). + */ +#define GLFW_COCOA_GRAPHICS_SWITCHING 0x00023003 +/*! @brief X11 specific + * [window hint](@ref GLFW_X11_CLASS_NAME_hint). + */ +#define GLFW_X11_CLASS_NAME 0x00024001 +/*! @brief X11 specific + * [window hint](@ref GLFW_X11_CLASS_NAME_hint). + */ +#define GLFW_X11_INSTANCE_NAME 0x00024002 +/*! @} */ + +#define GLFW_NO_API 0 +#define GLFW_OPENGL_API 0x00030001 +#define GLFW_OPENGL_ES_API 0x00030002 + +#define GLFW_NO_ROBUSTNESS 0 +#define GLFW_NO_RESET_NOTIFICATION 0x00031001 +#define GLFW_LOSE_CONTEXT_ON_RESET 0x00031002 + +#define GLFW_OPENGL_ANY_PROFILE 0 +#define GLFW_OPENGL_CORE_PROFILE 0x00032001 +#define GLFW_OPENGL_COMPAT_PROFILE 0x00032002 + +#define GLFW_CURSOR 0x00033001 +#define GLFW_STICKY_KEYS 0x00033002 +#define GLFW_STICKY_MOUSE_BUTTONS 0x00033003 +#define GLFW_LOCK_KEY_MODS 0x00033004 +#define GLFW_RAW_MOUSE_MOTION 0x00033005 + +#define GLFW_CURSOR_NORMAL 0x00034001 +#define GLFW_CURSOR_HIDDEN 0x00034002 +#define GLFW_CURSOR_DISABLED 0x00034003 + +#define GLFW_ANY_RELEASE_BEHAVIOR 0 +#define GLFW_RELEASE_BEHAVIOR_FLUSH 0x00035001 +#define GLFW_RELEASE_BEHAVIOR_NONE 0x00035002 + +#define GLFW_NATIVE_CONTEXT_API 0x00036001 +#define GLFW_EGL_CONTEXT_API 0x00036002 +#define GLFW_OSMESA_CONTEXT_API 0x00036003 + +/*! @defgroup shapes Standard cursor shapes + * @brief Standard system cursor shapes. + * + * See [standard cursor creation](@ref cursor_standard) for how these are used. + * + * @ingroup input + * @{ */ + +/*! @brief The regular arrow cursor shape. + * + * The regular arrow cursor. + */ +#define GLFW_ARROW_CURSOR 0x00036001 +/*! @brief The text input I-beam cursor shape. + * + * The text input I-beam cursor shape. + */ +#define GLFW_IBEAM_CURSOR 0x00036002 +/*! @brief The crosshair shape. + * + * The crosshair shape. + */ +#define GLFW_CROSSHAIR_CURSOR 0x00036003 +/*! @brief The hand shape. + * + * The hand shape. + */ +#define GLFW_HAND_CURSOR 0x00036004 +/*! @brief The horizontal resize arrow shape. + * + * The horizontal resize arrow shape. + */ +#define GLFW_HRESIZE_CURSOR 0x00036005 +/*! @brief The vertical resize arrow shape. + * + * The vertical resize arrow shape. + */ +#define GLFW_VRESIZE_CURSOR 0x00036006 +/*! @} */ + +#define GLFW_CONNECTED 0x00040001 +#define GLFW_DISCONNECTED 0x00040002 + +/*! @addtogroup init + * @{ */ +/*! @brief Joystick hat buttons init hint. + * + * Joystick hat buttons [init hint](@ref GLFW_JOYSTICK_HAT_BUTTONS). + */ +#define GLFW_JOYSTICK_HAT_BUTTONS 0x00050001 +/*! @brief macOS specific init hint. + * + * macOS specific [init hint](@ref GLFW_COCOA_CHDIR_RESOURCES_hint). + */ +#define GLFW_COCOA_CHDIR_RESOURCES 0x00051001 +/*! @brief macOS specific init hint. + * + * macOS specific [init hint](@ref GLFW_COCOA_MENUBAR_hint). + */ +#define GLFW_COCOA_MENUBAR 0x00051002 +/*! @} */ + +#define GLFW_DONT_CARE -1 + + +/************************************************************************* + * GLFW API types + *************************************************************************/ + +/*! @brief Client API function pointer type. + * + * Generic function pointer used for returning client API function pointers + * without forcing a cast from a regular pointer. + * + * @sa @ref context_glext + * @sa @ref glfwGetProcAddress + * + * @since Added in version 3.0. + * + * @ingroup context + */ +typedef void (*GLFWglproc)(void); + +/*! @brief Vulkan API function pointer type. + * + * Generic function pointer used for returning Vulkan API function pointers + * without forcing a cast from a regular pointer. + * + * @sa @ref vulkan_proc + * @sa @ref glfwGetInstanceProcAddress + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +typedef void (*GLFWvkproc)(void); + +/*! @brief Opaque monitor object. + * + * Opaque monitor object. + * + * @see @ref monitor_object + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef struct GLFWmonitor GLFWmonitor; + +/*! @brief Opaque window object. + * + * Opaque window object. + * + * @see @ref window_object + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef struct GLFWwindow GLFWwindow; + +/*! @brief Opaque cursor object. + * + * Opaque cursor object. + * + * @see @ref cursor_object + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef struct GLFWcursor GLFWcursor; + +/*! @brief The function pointer type for error callbacks. + * + * This is the function pointer type for error callbacks. An error callback + * function has the following signature: + * @code + * void callback_name(int error_code, const char* description) + * @endcode + * + * @param[in] error_code An [error code](@ref errors). Future releases may add + * more error codes. + * @param[in] description A UTF-8 encoded string describing the error. + * + * @pointer_lifetime The error description string is valid until the callback + * function returns. + * + * @sa @ref error_handling + * @sa @ref glfwSetErrorCallback + * + * @since Added in version 3.0. + * + * @ingroup init + */ +typedef void (* GLFWerrorfun)(int,const char*); + +/*! @brief The function pointer type for window position callbacks. + * + * This is the function pointer type for window position callbacks. A window + * position callback function has the following signature: + * @code + * void callback_name(GLFWwindow* window, int xpos, int ypos) + * @endcode + * + * @param[in] window The window that was moved. + * @param[in] xpos The new x-coordinate, in screen coordinates, of the + * upper-left corner of the content area of the window. + * @param[in] ypos The new y-coordinate, in screen coordinates, of the + * upper-left corner of the content area of the window. + * + * @sa @ref window_pos + * @sa @ref glfwSetWindowPosCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowposfun)(GLFWwindow*,int,int); + +/*! @brief The function pointer type for window size callbacks. + * + * This is the function pointer type for window size callbacks. A window size + * callback function has the following signature: + * @code + * void callback_name(GLFWwindow* window, int width, int height) + * @endcode + * + * @param[in] window The window that was resized. + * @param[in] width The new width, in screen coordinates, of the window. + * @param[in] height The new height, in screen coordinates, of the window. + * + * @sa @ref window_size + * @sa @ref glfwSetWindowSizeCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowsizefun)(GLFWwindow*,int,int); + +/*! @brief The function pointer type for window close callbacks. + * + * This is the function pointer type for window close callbacks. A window + * close callback function has the following signature: + * @code + * void function_name(GLFWwindow* window) + * @endcode + * + * @param[in] window The window that the user attempted to close. + * + * @sa @ref window_close + * @sa @ref glfwSetWindowCloseCallback + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowclosefun)(GLFWwindow*); + +/*! @brief The function pointer type for window content refresh callbacks. + * + * This is the function pointer type for window content refresh callbacks. + * A window content refresh callback function has the following signature: + * @code + * void function_name(GLFWwindow* window); + * @endcode + * + * @param[in] window The window whose content needs to be refreshed. + * + * @sa @ref window_refresh + * @sa @ref glfwSetWindowRefreshCallback + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +typedef void (* GLFWwindowrefreshfun)(GLFWwindow*); + +/*! @brief The function pointer type for window focus callbacks. + * + * This is the function pointer type for window focus callbacks. A window + * focus callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int focused) + * @endcode + * + * @param[in] window The window that gained or lost input focus. + * @param[in] focused `GLFW_TRUE` if the window was given input focus, or + * `GLFW_FALSE` if it lost it. + * + * @sa @ref window_focus + * @sa @ref glfwSetWindowFocusCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowfocusfun)(GLFWwindow*,int); + +/*! @brief The function pointer type for window iconify callbacks. + * + * This is the function pointer type for window iconify callbacks. A window + * iconify callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int iconified) + * @endcode + * + * @param[in] window The window that was iconified or restored. + * @param[in] iconified `GLFW_TRUE` if the window was iconified, or + * `GLFW_FALSE` if it was restored. + * + * @sa @ref window_iconify + * @sa @ref glfwSetWindowIconifyCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWwindowiconifyfun)(GLFWwindow*,int); + +/*! @brief The function pointer type for window maximize callbacks. + * + * This is the function pointer type for window maximize callbacks. A window + * maximize callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int maximized) + * @endcode + * + * @param[in] window The window that was maximized or restored. + * @param[in] iconified `GLFW_TRUE` if the window was maximized, or + * `GLFW_FALSE` if it was restored. + * + * @sa @ref window_maximize + * @sa glfwSetWindowMaximizeCallback + * + * @since Added in version 3.3. + * + * @ingroup window + */ +typedef void (* GLFWwindowmaximizefun)(GLFWwindow*,int); + +/*! @brief The function pointer type for framebuffer size callbacks. + * + * This is the function pointer type for framebuffer size callbacks. + * A framebuffer size callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * + * @param[in] window The window whose framebuffer was resized. + * @param[in] width The new width, in pixels, of the framebuffer. + * @param[in] height The new height, in pixels, of the framebuffer. + * + * @sa @ref window_fbsize + * @sa @ref glfwSetFramebufferSizeCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +typedef void (* GLFWframebuffersizefun)(GLFWwindow*,int,int); + +/*! @brief The function pointer type for window content scale callbacks. + * + * This is the function pointer type for window content scale callbacks. + * A window content scale callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, float xscale, float yscale) + * @endcode + * + * @param[in] window The window whose content scale changed. + * @param[in] xscale The new x-axis content scale of the window. + * @param[in] yscale The new y-axis content scale of the window. + * + * @sa @ref window_scale + * @sa @ref glfwSetWindowContentScaleCallback + * + * @since Added in version 3.3. + * + * @ingroup window + */ +typedef void (* GLFWwindowcontentscalefun)(GLFWwindow*,float,float); + +/*! @brief The function pointer type for mouse button callbacks. + * + * This is the function pointer type for mouse button callback functions. + * A mouse button callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int button, int action, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] button The [mouse button](@ref buttons) that was pressed or + * released. + * @param[in] action One of `GLFW_PRESS` or `GLFW_RELEASE`. Future releases + * may add more actions. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_mouse_button + * @sa @ref glfwSetMouseButtonCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle and modifier mask parameters. + * + * @ingroup input + */ +typedef void (* GLFWmousebuttonfun)(GLFWwindow*,int,int,int); + +/*! @brief The function pointer type for cursor position callbacks. + * + * This is the function pointer type for cursor position callbacks. A cursor + * position callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, double xpos, double ypos); + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] xpos The new cursor x-coordinate, relative to the left edge of + * the content area. + * @param[in] ypos The new cursor y-coordinate, relative to the top edge of the + * content area. + * + * @sa @ref cursor_pos + * @sa @ref glfwSetCursorPosCallback + * + * @since Added in version 3.0. Replaces `GLFWmouseposfun`. + * + * @ingroup input + */ +typedef void (* GLFWcursorposfun)(GLFWwindow*,double,double); + +/*! @brief The function pointer type for cursor enter/leave callbacks. + * + * This is the function pointer type for cursor enter/leave callbacks. + * A cursor enter/leave callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int entered) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] entered `GLFW_TRUE` if the cursor entered the window's content + * area, or `GLFW_FALSE` if it left it. + * + * @sa @ref cursor_enter + * @sa @ref glfwSetCursorEnterCallback + * + * @since Added in version 3.0. + * + * @ingroup input + */ +typedef void (* GLFWcursorenterfun)(GLFWwindow*,int); + +/*! @brief The function pointer type for scroll callbacks. + * + * This is the function pointer type for scroll callbacks. A scroll callback + * function has the following signature: + * @code + * void function_name(GLFWwindow* window, double xoffset, double yoffset) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] xoffset The scroll offset along the x-axis. + * @param[in] yoffset The scroll offset along the y-axis. + * + * @sa @ref scrolling + * @sa @ref glfwSetScrollCallback + * + * @since Added in version 3.0. Replaces `GLFWmousewheelfun`. + * + * @ingroup input + */ +typedef void (* GLFWscrollfun)(GLFWwindow*,double,double); + +/*! @brief The function pointer type for keyboard key callbacks. + * + * This is the function pointer type for keyboard key callbacks. A keyboard + * key callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] key The [keyboard key](@ref keys) that was pressed or released. + * @param[in] scancode The system-specific scancode of the key. + * @param[in] action `GLFW_PRESS`, `GLFW_RELEASE` or `GLFW_REPEAT`. Future + * releases may add more actions. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_key + * @sa @ref glfwSetKeyCallback + * + * @since Added in version 1.0. + * @glfw3 Added window handle, scancode and modifier mask parameters. + * + * @ingroup input + */ +typedef void (* GLFWkeyfun)(GLFWwindow*,int,int,int,int); + +/*! @brief The function pointer type for Unicode character callbacks. + * + * This is the function pointer type for Unicode character callbacks. + * A Unicode character callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] codepoint The Unicode code point of the character. + * + * @sa @ref input_char + * @sa @ref glfwSetCharCallback + * + * @since Added in version 2.4. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +typedef void (* GLFWcharfun)(GLFWwindow*,unsigned int); + +/*! @brief The function pointer type for Unicode character with modifiers + * callbacks. + * + * This is the function pointer type for Unicode character with modifiers + * callbacks. It is called for each input character, regardless of what + * modifier keys are held down. A Unicode character with modifiers callback + * function has the following signature: + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint, int mods) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] codepoint The Unicode code point of the character. + * @param[in] mods Bit field describing which [modifier keys](@ref mods) were + * held down. + * + * @sa @ref input_char + * @sa @ref glfwSetCharModsCallback + * + * @deprecated Scheduled for removal in version 4.0. + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef void (* GLFWcharmodsfun)(GLFWwindow*,unsigned int,int); + +/*! @brief The function pointer type for path drop callbacks. + * + * This is the function pointer type for path drop callbacks. A path drop + * callback function has the following signature: + * @code + * void function_name(GLFWwindow* window, int path_count, const char* paths[]) + * @endcode + * + * @param[in] window The window that received the event. + * @param[in] path_count The number of dropped paths. + * @param[in] paths The UTF-8 encoded file and/or directory path names. + * + * @pointer_lifetime The path array and its strings are valid until the + * callback function returns. + * + * @sa @ref path_drop + * @sa @ref glfwSetDropCallback + * + * @since Added in version 3.1. + * + * @ingroup input + */ +typedef void (* GLFWdropfun)(GLFWwindow*,int,const char*[]); + +/*! @brief The function pointer type for monitor configuration callbacks. + * + * This is the function pointer type for monitor configuration callbacks. + * A monitor callback function has the following signature: + * @code + * void function_name(GLFWmonitor* monitor, int event) + * @endcode + * + * @param[in] monitor The monitor that was connected or disconnected. + * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future + * releases may add more events. + * + * @sa @ref monitor_event + * @sa @ref glfwSetMonitorCallback + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef void (* GLFWmonitorfun)(GLFWmonitor*,int); + +/*! @brief The function pointer type for joystick configuration callbacks. + * + * This is the function pointer type for joystick configuration callbacks. + * A joystick configuration callback function has the following signature: + * @code + * void function_name(int jid, int event) + * @endcode + * + * @param[in] jid The joystick that was connected or disconnected. + * @param[in] event One of `GLFW_CONNECTED` or `GLFW_DISCONNECTED`. Future + * releases may add more events. + * + * @sa @ref joystick_event + * @sa @ref glfwSetJoystickCallback + * + * @since Added in version 3.2. + * + * @ingroup input + */ +typedef void (* GLFWjoystickfun)(int,int); + +/*! @brief Video mode type. + * + * This describes a single video mode. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoMode + * @sa @ref glfwGetVideoModes + * + * @since Added in version 1.0. + * @glfw3 Added refresh rate member. + * + * @ingroup monitor + */ +typedef struct GLFWvidmode +{ + /*! The width, in screen coordinates, of the video mode. + */ + int width; + /*! The height, in screen coordinates, of the video mode. + */ + int height; + /*! The bit depth of the red channel of the video mode. + */ + int redBits; + /*! The bit depth of the green channel of the video mode. + */ + int greenBits; + /*! The bit depth of the blue channel of the video mode. + */ + int blueBits; + /*! The refresh rate, in Hz, of the video mode. + */ + int refreshRate; +} GLFWvidmode; + +/*! @brief Gamma ramp. + * + * This describes the gamma ramp for a monitor. + * + * @sa @ref monitor_gamma + * @sa @ref glfwGetGammaRamp + * @sa @ref glfwSetGammaRamp + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +typedef struct GLFWgammaramp +{ + /*! An array of value describing the response of the red channel. + */ + unsigned short* red; + /*! An array of value describing the response of the green channel. + */ + unsigned short* green; + /*! An array of value describing the response of the blue channel. + */ + unsigned short* blue; + /*! The number of elements in each array. + */ + unsigned int size; +} GLFWgammaramp; + +/*! @brief Image data. + * + * This describes a single 2D image. See the documentation for each related + * function what the expected pixel format is. + * + * @sa @ref cursor_custom + * @sa @ref window_icon + * + * @since Added in version 2.1. + * @glfw3 Removed format and bytes-per-pixel members. + * + * @ingroup window + */ +typedef struct GLFWimage +{ + /*! The width, in pixels, of this image. + */ + int width; + /*! The height, in pixels, of this image. + */ + int height; + /*! The pixel data of this image, arranged left-to-right, top-to-bottom. + */ + unsigned char* pixels; +} GLFWimage; + +/*! @brief Gamepad input state + * + * This describes the input state of a gamepad. + * + * @sa @ref gamepad + * @sa @ref glfwGetGamepadState + * + * @since Added in version 3.3. + * + * @ingroup input + */ +typedef struct GLFWgamepadstate +{ + /*! The states of each [gamepad button](@ref gamepad_buttons), `GLFW_PRESS` + * or `GLFW_RELEASE`. + */ + unsigned char buttons[15]; + /*! The states of each [gamepad axis](@ref gamepad_axes), in the range -1.0 + * to 1.0 inclusive. + */ + float axes[6]; +} GLFWgamepadstate; + + +/************************************************************************* + * GLFW API functions + *************************************************************************/ + +/*! @brief Initializes the GLFW library. + * + * This function initializes the GLFW library. Before most GLFW functions can + * be used, GLFW must be initialized, and before an application terminates GLFW + * should be terminated in order to free any resources allocated during or + * after initialization. + * + * If this function fails, it calls @ref glfwTerminate before returning. If it + * succeeds, you should call @ref glfwTerminate before the application exits. + * + * Additional calls to this function after successful initialization but before + * termination will return `GLFW_TRUE` immediately. + * + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_PLATFORM_ERROR. + * + * @remark @macos This function will change the current directory of the + * application to the `Contents/Resources` subdirectory of the application's + * bundle, if present. This can be disabled with the @ref + * GLFW_COCOA_CHDIR_RESOURCES init hint. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref intro_init + * @sa @ref glfwTerminate + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI int glfwInit(void); + +/*! @brief Terminates the GLFW library. + * + * This function destroys all remaining windows and cursors, restores any + * modified gamma ramps and frees any other allocated resources. Once this + * function is called, you must again call @ref glfwInit successfully before + * you will be able to use most GLFW functions. + * + * If GLFW has been successfully initialized, this function should be called + * before the application exits. If initialization fails, there is no need to + * call this function, as it is called by @ref glfwInit before it returns + * failure. + * + * @errors Possible errors include @ref GLFW_PLATFORM_ERROR. + * + * @remark This function may be called before @ref glfwInit. + * + * @warning The contexts of any remaining windows must not be current on any + * other thread when this function is called. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref intro_init + * @sa @ref glfwInit + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI void glfwTerminate(void); + +/*! @brief Sets the specified init hint to the desired value. + * + * This function sets hints for the next initialization of GLFW. + * + * The values you set hints to are never reset by GLFW, but they only take + * effect during initialization. Once GLFW has been initialized, any values + * you set will be ignored until the library is terminated and initialized + * again. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [init hint](@ref init_hints) to set. + * @param[in] value The new value of the init hint. + * + * @errors Possible errors include @ref GLFW_INVALID_ENUM and @ref + * GLFW_INVALID_VALUE. + * + * @remarks This function may be called before @ref glfwInit. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa init_hints + * @sa glfwInit + * + * @since Added in version 3.3. + * + * @ingroup init + */ +GLFWAPI void glfwInitHint(int hint, int value); + +/*! @brief Retrieves the version of the GLFW library. + * + * This function retrieves the major, minor and revision numbers of the GLFW + * library. It is intended for when you are using GLFW as a shared library and + * want to ensure that you are using the minimum required version. + * + * Any or all of the version arguments may be `NULL`. + * + * @param[out] major Where to store the major version number, or `NULL`. + * @param[out] minor Where to store the minor version number, or `NULL`. + * @param[out] rev Where to store the revision number, or `NULL`. + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref intro_version + * @sa @ref glfwGetVersionString + * + * @since Added in version 1.0. + * + * @ingroup init + */ +GLFWAPI void glfwGetVersion(int* major, int* minor, int* rev); + +/*! @brief Returns a string describing the compile-time configuration. + * + * This function returns the compile-time generated + * [version string](@ref intro_version_string) of the GLFW library binary. It + * describes the version, platform, compiler and any platform-specific + * compile-time options. It should not be confused with the OpenGL or OpenGL + * ES version string, queried with `glGetString`. + * + * __Do not use the version string__ to parse the GLFW library version. The + * @ref glfwGetVersion function provides the version of the running library + * binary in numerical format. + * + * @return The ASCII encoded GLFW version string. + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @pointer_lifetime The returned string is static and compile-time generated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref intro_version + * @sa @ref glfwGetVersion + * + * @since Added in version 3.0. + * + * @ingroup init + */ +GLFWAPI const char* glfwGetVersionString(void); + +/*! @brief Returns and clears the last error for the calling thread. + * + * This function returns and clears the [error code](@ref errors) of the last + * error that occurred on the calling thread, and optionally a UTF-8 encoded + * human-readable description of it. If no error has occurred since the last + * call, it returns @ref GLFW_NO_ERROR (zero) and the description pointer is + * set to `NULL`. + * + * @param[in] description Where to store the error description pointer, or `NULL`. + * @return The last error code for the calling thread, or @ref GLFW_NO_ERROR + * (zero). + * + * @errors None. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * next error occurs or the library is terminated. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref error_handling + * @sa @ref glfwSetErrorCallback + * + * @since Added in version 3.3. + * + * @ingroup init + */ +GLFWAPI int glfwGetError(const char** description); + +/*! @brief Sets the error callback. + * + * This function sets the error callback, which is called with an error code + * and a human-readable description each time a GLFW error occurs. + * + * The error code is set before the callback is called. Calling @ref + * glfwGetError from the error callback will return the same value as the error + * code argument. + * + * The error callback is called on the thread where the error occurred. If you + * are using GLFW from multiple threads, your error callback needs to be + * written accordingly. + * + * Because the description string may have been generated specifically for that + * error, it is not guaranteed to be valid after the callback has returned. If + * you wish to use it after the callback returns, you need to make a copy. + * + * Once set, the error callback remains set even after the library has been + * terminated. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set. + * + * @callback_signature + * @code + * void callback_name(int error_code, const char* description) + * @endcode + * For more information about the callback parameters, see the + * [callback pointer type](@ref GLFWerrorfun). + * + * @errors None. + * + * @remark This function may be called before @ref glfwInit. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref error_handling + * @sa @ref glfwGetError + * + * @since Added in version 3.0. + * + * @ingroup init + */ +GLFWAPI GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun callback); + +/*! @brief Returns the currently connected monitors. + * + * This function returns an array of handles for all currently connected + * monitors. The primary monitor is always first in the returned array. If no + * monitors were found, this function returns `NULL`. + * + * @param[out] count Where to store the number of monitors in the returned + * array. This is set to zero if an error occurred. + * @return An array of monitor handles, or `NULL` if no monitors were found or + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * monitor configuration changes or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_monitors + * @sa @ref monitor_event + * @sa @ref glfwGetPrimaryMonitor + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitor** glfwGetMonitors(int* count); + +/*! @brief Returns the primary monitor. + * + * This function returns the primary monitor. This is usually the monitor + * where elements like the task bar or global menu bar are located. + * + * @return The primary monitor, or `NULL` if no monitors were found or if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @remark The primary monitor is always first in the array returned by @ref + * glfwGetMonitors. + * + * @sa @ref monitor_monitors + * @sa @ref glfwGetMonitors + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitor* glfwGetPrimaryMonitor(void); + +/*! @brief Returns the position of the monitor's viewport on the virtual screen. + * + * This function returns the position, in screen coordinates, of the upper-left + * corner of the specified monitor. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`. + * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos); + +/*! @brief Retrieves the work area of the monitor. + * + * This function returns the position, in screen coordinates, of the upper-left + * corner of the work area of the specified monitor along with the work area + * size in screen coordinates. The work area is defined as the area of the + * monitor not occluded by the operating system task bar where present. If no + * task bar exists then the work area is the monitor resolution in screen + * coordinates. + * + * Any or all of the position and size arguments may be `NULL`. If an error + * occurs, all non-`NULL` position and size arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] xpos Where to store the monitor x-coordinate, or `NULL`. + * @param[out] ypos Where to store the monitor y-coordinate, or `NULL`. + * @param[out] width Where to store the monitor width, or `NULL`. + * @param[out] height Where to store the monitor height, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_workarea + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorWorkarea(GLFWmonitor* monitor, int* xpos, int* ypos, int* width, int* height); + +/*! @brief Returns the physical size of the monitor. + * + * This function returns the size, in millimetres, of the display area of the + * specified monitor. + * + * Some systems do not provide accurate monitor size information, either + * because the monitor + * [EDID](https://en.wikipedia.org/wiki/Extended_display_identification_data) + * data is incorrect or because the driver does not report it accurately. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] monitor The monitor to query. + * @param[out] widthMM Where to store the width, in millimetres, of the + * monitor's display area, or `NULL`. + * @param[out] heightMM Where to store the height, in millimetres, of the + * monitor's display area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @win32 calculates the returned physical size from the + * current resolution and system DPI instead of querying the monitor EDID data. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* widthMM, int* heightMM); + +/*! @brief Retrieves the content scale for the specified monitor. + * + * This function retrieves the content scale for the specified monitor. The + * content scale is the ratio between the current DPI and the platform's + * default DPI. This is especially important for text and any UI elements. If + * the pixel dimensions of your UI scaled by this look appropriate on your + * machine then it should appear at a reasonable size on other machines + * regardless of their DPI and scaling settings. This relies on the system DPI + * and scaling settings being somewhat correct. + * + * The content scale may depend on both the monitor resolution and pixel + * density and on user settings. It may be very different from the raw DPI + * calculated from the physical size and current resolution. + * + * @param[in] monitor The monitor to query. + * @param[out] xscale Where to store the x-axis content scale, or `NULL`. + * @param[out] yscale Where to store the y-axis content scale, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_scale + * @sa @ref glfwGetWindowContentScale + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwGetMonitorContentScale(GLFWmonitor* monitor, float* xscale, float* yscale); + +/*! @brief Returns the name of the specified monitor. + * + * This function returns a human-readable name, encoded as UTF-8, of the + * specified monitor. The name typically reflects the make and model of the + * monitor and is not guaranteed to be unique among the connected monitors. + * + * @param[in] monitor The monitor to query. + * @return The UTF-8 encoded name of the monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_properties + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI const char* glfwGetMonitorName(GLFWmonitor* monitor); + +/*! @brief Sets the user pointer of the specified monitor. + * + * This function sets the user-defined pointer of the specified monitor. The + * current value is retained until the monitor is disconnected. The initial + * value is `NULL`. + * + * This function may be called from the monitor callback, even for a monitor + * that is being disconnected. + * + * @param[in] monitor The monitor whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref monitor_userptr + * @sa @ref glfwGetMonitorUserPointer + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetMonitorUserPointer(GLFWmonitor* monitor, void* pointer); + +/*! @brief Returns the user pointer of the specified monitor. + * + * This function returns the current value of the user-defined pointer of the + * specified monitor. The initial value is `NULL`. + * + * This function may be called from the monitor callback, even for a monitor + * that is being disconnected. + * + * @param[in] monitor The monitor whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref monitor_userptr + * @sa @ref glfwSetMonitorUserPointer + * + * @since Added in version 3.3. + * + * @ingroup monitor + */ +GLFWAPI void* glfwGetMonitorUserPointer(GLFWmonitor* monitor); + +/*! @brief Sets the monitor configuration callback. + * + * This function sets the monitor configuration callback, or removes the + * currently set callback. This is called when a monitor is connected to or + * disconnected from the system. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWmonitor* monitor, int event) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWmonitorfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_event + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun callback); + +/*! @brief Returns the available video modes for the specified monitor. + * + * This function returns an array of all video modes supported by the specified + * monitor. The returned array is sorted in ascending order, first by color + * bit depth (the sum of all channel depths) and then by resolution area (the + * product of width and height). + * + * @param[in] monitor The monitor to query. + * @param[out] count Where to store the number of video modes in the returned + * array. This is set to zero if an error occurred. + * @return An array of video modes, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected, this function is called again for that monitor or the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoMode + * + * @since Added in version 1.0. + * @glfw3 Changed to return an array of modes for a specific monitor. + * + * @ingroup monitor + */ +GLFWAPI const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count); + +/*! @brief Returns the current mode of the specified monitor. + * + * This function returns the current video mode of the specified monitor. If + * you have created a full screen window for that monitor, the return value + * will depend on whether that window is iconified. + * + * @param[in] monitor The monitor to query. + * @return The current mode of the monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified monitor is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_modes + * @sa @ref glfwGetVideoModes + * + * @since Added in version 3.0. Replaces `glfwGetDesktopMode`. + * + * @ingroup monitor + */ +GLFWAPI const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor); + +/*! @brief Generates a gamma ramp and sets it for the specified monitor. + * + * This function generates an appropriately sized gamma ramp from the specified + * exponent and then calls @ref glfwSetGammaRamp with it. The value must be + * a finite number greater than zero. + * + * The software controlled gamma ramp is applied _in addition_ to the hardware + * gamma correction, which today is usually an approximation of sRGB gamma. + * This means that setting a perfectly linear ramp, or gamma 1.0, will produce + * the default (usually sRGB-like) behavior. + * + * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref + * GLFW_SRGB_CAPABLE hint. + * + * @param[in] monitor The monitor whose gamma ramp to set. + * @param[in] gamma The desired exponent. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetGamma(GLFWmonitor* monitor, float gamma); + +/*! @brief Returns the current gamma ramp for the specified monitor. + * + * This function returns the current gamma ramp of the specified monitor. + * + * @param[in] monitor The monitor to query. + * @return The current gamma ramp, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR while + * returning `NULL`. + * + * @pointer_lifetime The returned structure and its arrays are allocated and + * freed by GLFW. You should not free them yourself. They are valid until the + * specified monitor is disconnected, this function is called again for that + * monitor or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor); + +/*! @brief Sets the current gamma ramp for the specified monitor. + * + * This function sets the current gamma ramp for the specified monitor. The + * original gamma ramp for that monitor is saved by GLFW the first time this + * function is called and is restored by @ref glfwTerminate. + * + * The software controlled gamma ramp is applied _in addition_ to the hardware + * gamma correction, which today is usually an approximation of sRGB gamma. + * This means that setting a perfectly linear ramp, or gamma 1.0, will produce + * the default (usually sRGB-like) behavior. + * + * For gamma correct rendering with OpenGL or OpenGL ES, see the @ref + * GLFW_SRGB_CAPABLE hint. + * + * @param[in] monitor The monitor whose gamma ramp to set. + * @param[in] ramp The gamma ramp to use. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The size of the specified gamma ramp should match the size of the + * current ramp for that monitor. + * + * @remark @win32 The gamma ramp size must be 256. + * + * @remark @wayland Gamma handling is a privileged protocol, this function + * will thus never be implemented and emits @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified gamma ramp is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref monitor_gamma + * + * @since Added in version 3.0. + * + * @ingroup monitor + */ +GLFWAPI void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp); + +/*! @brief Resets all window hints to their default values. + * + * This function resets all window hints to their + * [default values](@ref window_hints_values). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHint + * @sa @ref glfwWindowHintString + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwDefaultWindowHints(void); + +/*! @brief Sets the specified window hint to the desired value. + * + * This function sets hints for the next call to @ref glfwCreateWindow. The + * hints, once set, retain their values until changed by a call to this + * function or @ref glfwDefaultWindowHints, or until the library is terminated. + * + * Only integer value hints can be set with this function. String value hints + * are set with @ref glfwWindowHintString. + * + * This function does not check whether the specified hint values are valid. + * If you set hints to invalid values this will instead be reported by the next + * call to @ref glfwCreateWindow. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [window hint](@ref window_hints) to set. + * @param[in] value The new value of the window hint. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHintString + * @sa @ref glfwDefaultWindowHints + * + * @since Added in version 3.0. Replaces `glfwOpenWindowHint`. + * + * @ingroup window + */ +GLFWAPI void glfwWindowHint(int hint, int value); + +/*! @brief Sets the specified window hint to the desired value. + * + * This function sets hints for the next call to @ref glfwCreateWindow. The + * hints, once set, retain their values until changed by a call to this + * function or @ref glfwDefaultWindowHints, or until the library is terminated. + * + * Only string type hints can be set with this function. Integer value hints + * are set with @ref glfwWindowHint. + * + * This function does not check whether the specified hint values are valid. + * If you set hints to invalid values this will instead be reported by the next + * call to @ref glfwCreateWindow. + * + * Some hints are platform specific. These may be set on any platform but they + * will only affect their specific platform. Other platforms will ignore them. + * Setting these hints requires no platform specific headers or functions. + * + * @param[in] hint The [window hint](@ref window_hints) to set. + * @param[in] value The new value of the window hint. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hints + * @sa @ref glfwWindowHint + * @sa @ref glfwDefaultWindowHints + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwWindowHintString(int hint, const char* value); + +/*! @brief Creates a window and its associated context. + * + * This function creates a window and its associated OpenGL or OpenGL ES + * context. Most of the options controlling how the window and its context + * should be created are specified with [window hints](@ref window_hints). + * + * Successful creation does not change which context is current. Before you + * can use the newly created context, you need to + * [make it current](@ref context_current). For information about the `share` + * parameter, see @ref context_sharing. + * + * The created window, framebuffer and context may differ from what you + * requested, as not all parameters and hints are + * [hard constraints](@ref window_hints_hard). This includes the size of the + * window, especially for full screen windows. To query the actual attributes + * of the created window, framebuffer and context, see @ref + * glfwGetWindowAttrib, @ref glfwGetWindowSize and @ref glfwGetFramebufferSize. + * + * To create a full screen window, you need to specify the monitor the window + * will cover. If no monitor is specified, the window will be windowed mode. + * Unless you have a way for the user to choose a specific monitor, it is + * recommended that you pick the primary monitor. For more information on how + * to query connected monitors, see @ref monitor_monitors. + * + * For full screen windows, the specified size becomes the resolution of the + * window's _desired video mode_. As long as a full screen window is not + * iconified, the supported video mode most closely matching the desired video + * mode is set for the specified monitor. For more information about full + * screen windows, including the creation of so called _windowed full screen_ + * or _borderless full screen_ windows, see @ref window_windowed_full_screen. + * + * Once you have created the window, you can switch it between windowed and + * full screen mode with @ref glfwSetWindowMonitor. This will not affect its + * OpenGL or OpenGL ES context. + * + * By default, newly created windows use the placement recommended by the + * window system. To create the window at a specific position, make it + * initially invisible using the [GLFW_VISIBLE](@ref GLFW_VISIBLE_hint) window + * hint, set its [position](@ref window_pos) and then [show](@ref window_hide) + * it. + * + * As long as at least one full screen window is not iconified, the screensaver + * is prohibited from starting. + * + * Window systems put limits on window sizes. Very large or very small window + * dimensions may be overridden by the window system on creation. Check the + * actual [size](@ref window_size) after creation. + * + * The [swap interval](@ref buffer_swap) is not set during window creation and + * the initial value may vary depending on driver settings and defaults. + * + * @param[in] width The desired width, in screen coordinates, of the window. + * This must be greater than zero. + * @param[in] height The desired height, in screen coordinates, of the window. + * This must be greater than zero. + * @param[in] title The initial, UTF-8 encoded window title. + * @param[in] monitor The monitor to use for full screen mode, or `NULL` for + * windowed mode. + * @param[in] share The window whose context to share resources with, or `NULL` + * to not share resources. + * @return The handle of the created window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE, @ref GLFW_API_UNAVAILABLE, @ref + * GLFW_VERSION_UNAVAILABLE, @ref GLFW_FORMAT_UNAVAILABLE and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @win32 Window creation will fail if the Microsoft GDI software + * OpenGL implementation is the only one available. + * + * @remark @win32 If the executable has an icon resource named `GLFW_ICON,` it + * will be set as the initial icon for the window. If no such icon is present, + * the `IDI_APPLICATION` icon will be used instead. To set a different icon, + * see @ref glfwSetWindowIcon. + * + * @remark @win32 The context to share resources with must not be current on + * any other thread. + * + * @remark @macos The OS only supports forward-compatible core profile contexts + * for OpenGL versions 3.2 and later. Before creating an OpenGL context of + * version 3.2 or later you must set the + * [GLFW_OPENGL_FORWARD_COMPAT](@ref GLFW_OPENGL_FORWARD_COMPAT_hint) and + * [GLFW_OPENGL_PROFILE](@ref GLFW_OPENGL_PROFILE_hint) hints accordingly. + * OpenGL 3.0 and 3.1 contexts are not supported at all on macOS. + * + * @remark @macos The GLFW window has no icon, as it is not a document + * window, but the dock icon will be the same as the application bundle's icon. + * For more information on bundles, see the + * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) + * in the Mac Developer Library. + * + * @remark @macos The first time a window is created the menu bar is created. + * If GLFW finds a `MainMenu.nib` it is loaded and assumed to contain a menu + * bar. Otherwise a minimal menu bar is created manually with common commands + * like Hide, Quit and About. The About entry opens a minimal about dialog + * with information from the application's bundle. Menu bar creation can be + * disabled entirely with the @ref GLFW_COCOA_MENUBAR init hint. + * + * @remark @macos On OS X 10.10 and later the window frame will not be rendered + * at full resolution on Retina displays unless the + * [GLFW_COCOA_RETINA_FRAMEBUFFER](@ref GLFW_COCOA_RETINA_FRAMEBUFFER_hint) + * hint is `GLFW_TRUE` and the `NSHighResolutionCapable` key is enabled in the + * application bundle's `Info.plist`. For more information, see + * [High Resolution Guidelines for OS X](https://developer.apple.com/library/mac/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html) + * in the Mac Developer Library. The GLFW test and example programs use + * a custom `Info.plist` template for this, which can be found as + * `CMake/MacOSXBundleInfo.plist.in` in the source tree. + * + * @remark @macos When activating frame autosaving with + * [GLFW_COCOA_FRAME_NAME](@ref GLFW_COCOA_FRAME_NAME_hint), the specified + * window size and position may be overridden by previously saved values. + * + * @remark @x11 Some window managers will not respect the placement of + * initially hidden windows. + * + * @remark @x11 Due to the asynchronous nature of X11, it may take a moment for + * a window to reach its requested state. This means you may not be able to + * query the final size, position or other attributes directly after window + * creation. + * + * @remark @x11 The class part of the `WM_CLASS` window property will by + * default be set to the window title passed to this function. The instance + * part will use the contents of the `RESOURCE_NAME` environment variable, if + * present and not empty, or fall back to the window title. Set the + * [GLFW_X11_CLASS_NAME](@ref GLFW_X11_CLASS_NAME_hint) and + * [GLFW_X11_INSTANCE_NAME](@ref GLFW_X11_INSTANCE_NAME_hint) window hints to + * override this. + * + * @remark @wayland Compositors should implement the xdg-decoration protocol + * for GLFW to decorate the window properly. If this protocol isn't + * supported, or if the compositor prefers client-side decorations, a very + * simple fallback frame will be drawn using the wp_viewporter protocol. A + * compositor can still emit close, maximize or fullscreen events, using for + * instance a keybind mechanism. If neither of these protocols is supported, + * the window won't be decorated. + * + * @remark @wayland A full screen window will not attempt to change the mode, + * no matter what the requested size or refresh rate. + * + * @remark @wayland Screensaver inhibition requires the idle-inhibit protocol + * to be implemented in the user's compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_creation + * @sa @ref glfwDestroyWindow + * + * @since Added in version 3.0. Replaces `glfwOpenWindow`. + * + * @ingroup window + */ +GLFWAPI GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share); + +/*! @brief Destroys the specified window and its context. + * + * This function destroys the specified window and its context. On calling + * this function, no further callbacks will be called for that window. + * + * If the context of the specified window is current on the main thread, it is + * detached before being destroyed. + * + * @param[in] window The window to destroy. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @note The context of the specified window must not be current on any other + * thread when this function is called. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_creation + * @sa @ref glfwCreateWindow + * + * @since Added in version 3.0. Replaces `glfwCloseWindow`. + * + * @ingroup window + */ +GLFWAPI void glfwDestroyWindow(GLFWwindow* window); + +/*! @brief Checks the close flag of the specified window. + * + * This function returns the value of the close flag of the specified window. + * + * @param[in] window The window to query. + * @return The value of the close flag. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_close + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI int glfwWindowShouldClose(GLFWwindow* window); + +/*! @brief Sets the close flag of the specified window. + * + * This function sets the value of the close flag of the specified window. + * This can be used to override the user's attempt to close the window, or + * to signal that it should be closed. + * + * @param[in] window The window whose flag to change. + * @param[in] value The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_close + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowShouldClose(GLFWwindow* window, int value); + +/*! @brief Sets the title of the specified window. + * + * This function sets the window title, encoded as UTF-8, of the specified + * window. + * + * @param[in] window The window whose title to change. + * @param[in] title The UTF-8 encoded window title. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @macos The window title will not be updated until the next time you + * process events. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_title + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowTitle(GLFWwindow* window, const char* title); + +/*! @brief Sets the icon for the specified window. + * + * This function sets the icon of the specified window. If passed an array of + * candidate images, those of or closest to the sizes desired by the system are + * selected. If no images are specified, the window reverts to its default + * icon. + * + * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight + * bits per channel with the red channel first. They are arranged canonically + * as packed sequential rows, starting from the top-left corner. + * + * The desired image sizes varies depending on platform and system settings. + * The selected images will be rescaled as needed. Good sizes include 16x16, + * 32x32 and 48x48. + * + * @param[in] window The window whose icon to set. + * @param[in] count The number of images in the specified array, or zero to + * revert to the default window icon. + * @param[in] images The images to create the icon from. This is ignored if + * count is zero. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified image data is copied before this function + * returns. + * + * @remark @macos The GLFW window has no icon, as it is not a document + * window, so this function does nothing. The dock icon will be the same as + * the application bundle's icon. For more information on bundles, see the + * [Bundle Programming Guide](https://developer.apple.com/library/mac/documentation/CoreFoundation/Conceptual/CFBundles/) + * in the Mac Developer Library. + * + * @remark @wayland There is no existing protocol to change an icon, the + * window will thus inherit the one defined in the application's desktop file. + * This function always emits @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_icon + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowIcon(GLFWwindow* window, int count, const GLFWimage* images); + +/*! @brief Retrieves the position of the content area of the specified window. + * + * This function retrieves the position, in screen coordinates, of the + * upper-left corner of the content area of the specified window. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] window The window to query. + * @param[out] xpos Where to store the x-coordinate of the upper-left corner of + * the content area, or `NULL`. + * @param[out] ypos Where to store the y-coordinate of the upper-left corner of + * the content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland There is no way for an application to retrieve the global + * position of its windows, this function will always emit @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * @sa @ref glfwSetWindowPos + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos); + +/*! @brief Sets the position of the content area of the specified window. + * + * This function sets the position, in screen coordinates, of the upper-left + * corner of the content area of the specified windowed mode window. If the + * window is a full screen window, this function does nothing. + * + * __Do not use this function__ to move an already visible window unless you + * have very good reasons for doing so, as it will confuse and annoy the user. + * + * The window manager may put limits on what positions are allowed. GLFW + * cannot and should not override these limits. + * + * @param[in] window The window to query. + * @param[in] xpos The x-coordinate of the upper-left corner of the content area. + * @param[in] ypos The y-coordinate of the upper-left corner of the content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland There is no way for an application to set the global + * position of its windows, this function will always emit @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * @sa @ref glfwGetWindowPos + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos); + +/*! @brief Retrieves the size of the content area of the specified window. + * + * This function retrieves the size, in screen coordinates, of the content area + * of the specified window. If you wish to retrieve the size of the + * framebuffer of the window in pixels, see @ref glfwGetFramebufferSize. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose size to retrieve. + * @param[out] width Where to store the width, in screen coordinates, of the + * content area, or `NULL`. + * @param[out] height Where to store the height, in screen coordinates, of the + * content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * @sa @ref glfwSetWindowSize + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowSize(GLFWwindow* window, int* width, int* height); + +/*! @brief Sets the size limits of the specified window. + * + * This function sets the size limits of the content area of the specified + * window. If the window is full screen, the size limits only take effect + * once it is made windowed. If the window is not resizable, this function + * does nothing. + * + * The size limits are applied immediately to a windowed mode window and may + * cause it to be resized. + * + * The maximum dimensions must be greater than or equal to the minimum + * dimensions and all must be greater than or equal to zero. + * + * @param[in] window The window to set limits for. + * @param[in] minwidth The minimum width, in screen coordinates, of the content + * area, or `GLFW_DONT_CARE`. + * @param[in] minheight The minimum height, in screen coordinates, of the + * content area, or `GLFW_DONT_CARE`. + * @param[in] maxwidth The maximum width, in screen coordinates, of the content + * area, or `GLFW_DONT_CARE`. + * @param[in] maxheight The maximum height, in screen coordinates, of the + * content area, or `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark If you set size limits and an aspect ratio that conflict, the + * results are undefined. + * + * @remark @wayland The size limits will not be applied until the window is + * actually resized, either by the user or by the compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_sizelimits + * @sa @ref glfwSetWindowAspectRatio + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowSizeLimits(GLFWwindow* window, int minwidth, int minheight, int maxwidth, int maxheight); + +/*! @brief Sets the aspect ratio of the specified window. + * + * This function sets the required aspect ratio of the content area of the + * specified window. If the window is full screen, the aspect ratio only takes + * effect once it is made windowed. If the window is not resizable, this + * function does nothing. + * + * The aspect ratio is specified as a numerator and a denominator and both + * values must be greater than zero. For example, the common 16:9 aspect ratio + * is specified as 16 and 9, respectively. + * + * If the numerator and denominator is set to `GLFW_DONT_CARE` then the aspect + * ratio limit is disabled. + * + * The aspect ratio is applied immediately to a windowed mode window and may + * cause it to be resized. + * + * @param[in] window The window to set limits for. + * @param[in] numer The numerator of the desired aspect ratio, or + * `GLFW_DONT_CARE`. + * @param[in] denom The denominator of the desired aspect ratio, or + * `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark If you set size limits and an aspect ratio that conflict, the + * results are undefined. + * + * @remark @wayland The aspect ratio will not be applied until the window is + * actually resized, either by the user or by the compositor. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_sizelimits + * @sa @ref glfwSetWindowSizeLimits + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowAspectRatio(GLFWwindow* window, int numer, int denom); + +/*! @brief Sets the size of the content area of the specified window. + * + * This function sets the size, in screen coordinates, of the content area of + * the specified window. + * + * For full screen windows, this function updates the resolution of its desired + * video mode and switches to the video mode closest to it, without affecting + * the window's context. As the context is unaffected, the bit depths of the + * framebuffer remain unchanged. + * + * If you wish to update the refresh rate of the desired video mode in addition + * to its resolution, see @ref glfwSetWindowMonitor. + * + * The window manager may put limits on what sizes are allowed. GLFW cannot + * and should not override these limits. + * + * @param[in] window The window to resize. + * @param[in] width The desired width, in screen coordinates, of the window + * content area. + * @param[in] height The desired height, in screen coordinates, of the window + * content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland A full screen window will not attempt to change the mode, + * no matter what the requested size. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * @sa @ref glfwGetWindowSize + * @sa @ref glfwSetWindowMonitor + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowSize(GLFWwindow* window, int width, int height); + +/*! @brief Retrieves the size of the framebuffer of the specified window. + * + * This function retrieves the size, in pixels, of the framebuffer of the + * specified window. If you wish to retrieve the size of the window in screen + * coordinates, see @ref glfwGetWindowSize. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose framebuffer to query. + * @param[out] width Where to store the width, in pixels, of the framebuffer, + * or `NULL`. + * @param[out] height Where to store the height, in pixels, of the framebuffer, + * or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_fbsize + * @sa @ref glfwSetFramebufferSizeCallback + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height); + +/*! @brief Retrieves the size of the frame of the window. + * + * This function retrieves the size, in screen coordinates, of each edge of the + * frame of the specified window. This size includes the title bar, if the + * window has one. The size of the frame may vary depending on the + * [window-related hints](@ref window_hints_wnd) used to create it. + * + * Because this function retrieves the size of each window frame edge and not + * the offset along a particular coordinate axis, the retrieved values will + * always be zero or positive. + * + * Any or all of the size arguments may be `NULL`. If an error occurs, all + * non-`NULL` size arguments will be set to zero. + * + * @param[in] window The window whose frame size to query. + * @param[out] left Where to store the size, in screen coordinates, of the left + * edge of the window frame, or `NULL`. + * @param[out] top Where to store the size, in screen coordinates, of the top + * edge of the window frame, or `NULL`. + * @param[out] right Where to store the size, in screen coordinates, of the + * right edge of the window frame, or `NULL`. + * @param[out] bottom Where to store the size, in screen coordinates, of the + * bottom edge of the window frame, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * + * @since Added in version 3.1. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowFrameSize(GLFWwindow* window, int* left, int* top, int* right, int* bottom); + +/*! @brief Retrieves the content scale for the specified window. + * + * This function retrieves the content scale for the specified window. The + * content scale is the ratio between the current DPI and the platform's + * default DPI. This is especially important for text and any UI elements. If + * the pixel dimensions of your UI scaled by this look appropriate on your + * machine then it should appear at a reasonable size on other machines + * regardless of their DPI and scaling settings. This relies on the system DPI + * and scaling settings being somewhat correct. + * + * On systems where each monitors can have its own content scale, the window + * content scale will depend on which monitor the system considers the window + * to be on. + * + * @param[in] window The window to query. + * @param[out] xscale Where to store the x-axis content scale, or `NULL`. + * @param[out] yscale Where to store the y-axis content scale, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_scale + * @sa @ref glfwSetWindowContentScaleCallback + * @sa @ref glfwGetMonitorContentScale + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwGetWindowContentScale(GLFWwindow* window, float* xscale, float* yscale); + +/*! @brief Returns the opacity of the whole window. + * + * This function returns the opacity of the window, including any decorations. + * + * The opacity (or alpha) value is a positive finite number between zero and + * one, where zero is fully transparent and one is fully opaque. If the system + * does not support whole window transparency, this function always returns one. + * + * The initial opacity value for newly created windows is one. + * + * @param[in] window The window to query. + * @return The opacity value of the specified window. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_transparency + * @sa @ref glfwSetWindowOpacity + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI float glfwGetWindowOpacity(GLFWwindow* window); + +/*! @brief Sets the opacity of the whole window. + * + * This function sets the opacity of the window, including any decorations. + * + * The opacity (or alpha) value is a positive finite number between zero and + * one, where zero is fully transparent and one is fully opaque. + * + * The initial opacity value for newly created windows is one. + * + * A window created with framebuffer transparency may not use whole window + * transparency. The results of doing this are undefined. + * + * @param[in] window The window to set the opacity for. + * @param[in] opacity The desired opacity of the specified window. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_transparency + * @sa @ref glfwGetWindowOpacity + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowOpacity(GLFWwindow* window, float opacity); + +/*! @brief Iconifies the specified window. + * + * This function iconifies (minimizes) the specified window if it was + * previously restored. If the window is already iconified, this function does + * nothing. + * + * If the specified window is a full screen window, the original monitor + * resolution is restored until the window is restored. + * + * @param[in] window The window to iconify. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland There is no concept of iconification in wl_shell, this + * function will emit @ref GLFW_PLATFORM_ERROR when using this deprecated + * protocol. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwRestoreWindow + * @sa @ref glfwMaximizeWindow + * + * @since Added in version 2.1. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwIconifyWindow(GLFWwindow* window); + +/*! @brief Restores the specified window. + * + * This function restores the specified window if it was previously iconified + * (minimized) or maximized. If the window is already restored, this function + * does nothing. + * + * If the specified window is a full screen window, the resolution chosen for + * the window is restored on the selected monitor. + * + * @param[in] window The window to restore. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwIconifyWindow + * @sa @ref glfwMaximizeWindow + * + * @since Added in version 2.1. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwRestoreWindow(GLFWwindow* window); + +/*! @brief Maximizes the specified window. + * + * This function maximizes the specified window if it was previously not + * maximized. If the window is already maximized, this function does nothing. + * + * If the specified window is a full screen window, this function does nothing. + * + * @param[in] window The window to maximize. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @par Thread Safety + * This function may only be called from the main thread. + * + * @sa @ref window_iconify + * @sa @ref glfwIconifyWindow + * @sa @ref glfwRestoreWindow + * + * @since Added in GLFW 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwMaximizeWindow(GLFWwindow* window); + +/*! @brief Makes the specified window visible. + * + * This function makes the specified window visible if it was previously + * hidden. If the window is already visible or is in full screen mode, this + * function does nothing. + * + * By default, windowed mode windows are focused when shown + * Set the [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) window hint + * to change this behavior for all newly created windows, or change the + * behavior for an existing window with @ref glfwSetWindowAttrib. + * + * @param[in] window The window to make visible. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hide + * @sa @ref glfwHideWindow + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwShowWindow(GLFWwindow* window); + +/*! @brief Hides the specified window. + * + * This function hides the specified window if it was previously visible. If + * the window is already hidden or is in full screen mode, this function does + * nothing. + * + * @param[in] window The window to hide. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_hide + * @sa @ref glfwShowWindow + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwHideWindow(GLFWwindow* window); + +/*! @brief Brings the specified window to front and sets input focus. + * + * This function brings the specified window to front and sets input focus. + * The window should already be visible and not iconified. + * + * By default, both windowed and full screen mode windows are focused when + * initially created. Set the [GLFW_FOCUSED](@ref GLFW_FOCUSED_hint) to + * disable this behavior. + * + * Also by default, windowed mode windows are focused when shown + * with @ref glfwShowWindow. Set the + * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_hint) to disable this behavior. + * + * __Do not use this function__ to steal focus from other applications unless + * you are certain that is what the user wants. Focus stealing can be + * extremely disruptive. + * + * For a less disruptive way of getting the user's attention, see + * [attention requests](@ref window_attention). + * + * @param[in] window The window to give input focus. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland It is not possible for an application to bring its windows + * to front, this function will always emit @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_focus + * @sa @ref window_attention + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwFocusWindow(GLFWwindow* window); + +/*! @brief Requests user attention to the specified window. + * + * This function requests user attention to the specified window. On + * platforms where this is not supported, attention is requested to the + * application as a whole. + * + * Once the user has given attention, usually by focusing the window or + * application, the system will end the request automatically. + * + * @param[in] window The window to request attention to. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @macos Attention is requested to the application as a whole, not the + * specific window. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attention + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwRequestWindowAttention(GLFWwindow* window); + +/*! @brief Returns the monitor that the window uses for full screen mode. + * + * This function returns the handle of the monitor that the specified window is + * in full screen on. + * + * @param[in] window The window to query. + * @return The monitor, or `NULL` if the window is in windowed mode or an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_monitor + * @sa @ref glfwSetWindowMonitor + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window); + +/*! @brief Sets the mode, monitor, video mode and placement of a window. + * + * This function sets the monitor that the window uses for full screen mode or, + * if the monitor is `NULL`, makes it windowed mode. + * + * When setting a monitor, this function updates the width, height and refresh + * rate of the desired video mode and switches to the video mode closest to it. + * The window position is ignored when setting a monitor. + * + * When the monitor is `NULL`, the position, width and height are used to + * place the window content area. The refresh rate is ignored when no monitor + * is specified. + * + * If you only wish to update the resolution of a full screen window or the + * size of a windowed mode window, see @ref glfwSetWindowSize. + * + * When a window transitions from full screen to windowed mode, this function + * restores any previous window settings such as whether it is decorated, + * floating, resizable, has size or aspect ratio limits, etc. + * + * @param[in] window The window whose monitor, size or video mode to set. + * @param[in] monitor The desired monitor, or `NULL` to set windowed mode. + * @param[in] xpos The desired x-coordinate of the upper-left corner of the + * content area. + * @param[in] ypos The desired y-coordinate of the upper-left corner of the + * content area. + * @param[in] width The desired with, in screen coordinates, of the content + * area or video mode. + * @param[in] height The desired height, in screen coordinates, of the content + * area or video mode. + * @param[in] refreshRate The desired refresh rate, in Hz, of the video mode, + * or `GLFW_DONT_CARE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The OpenGL or OpenGL ES context will not be destroyed or otherwise + * affected by any resizing or mode switching, although you may need to update + * your viewport if the framebuffer size has changed. + * + * @remark @wayland The desired window position is ignored, as there is no way + * for an application to set this property. + * + * @remark @wayland Setting the window to full screen will not attempt to + * change the mode, no matter what the requested size or refresh rate. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_monitor + * @sa @ref window_full_screen + * @sa @ref glfwGetWindowMonitor + * @sa @ref glfwSetWindowSize + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowMonitor(GLFWwindow* window, GLFWmonitor* monitor, int xpos, int ypos, int width, int height, int refreshRate); + +/*! @brief Returns an attribute of the specified window. + * + * This function returns the value of an attribute of the specified window or + * its OpenGL or OpenGL ES context. + * + * @param[in] window The window to query. + * @param[in] attrib The [window attribute](@ref window_attribs) whose value to + * return. + * @return The value of the attribute, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @remark Framebuffer related hints are not window attributes. See @ref + * window_attribs_fb for more information. + * + * @remark Zero is a valid value for many window and context related + * attributes so you cannot use a return value of zero as an indication of + * errors. However, this function should not fail as long as it is passed + * valid arguments and the library has been [initialized](@ref intro_init). + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attribs + * @sa @ref glfwSetWindowAttrib + * + * @since Added in version 3.0. Replaces `glfwGetWindowParam` and + * `glfwGetGLVersion`. + * + * @ingroup window + */ +GLFWAPI int glfwGetWindowAttrib(GLFWwindow* window, int attrib); + +/*! @brief Sets an attribute of the specified window. + * + * This function sets the value of an attribute of the specified window. + * + * The supported attributes are [GLFW_DECORATED](@ref GLFW_DECORATED_attrib), + * [GLFW_RESIZABLE](@ref GLFW_RESIZABLE_attrib), + * [GLFW_FLOATING](@ref GLFW_FLOATING_attrib), + * [GLFW_AUTO_ICONIFY](@ref GLFW_AUTO_ICONIFY_attrib) and + * [GLFW_FOCUS_ON_SHOW](@ref GLFW_FOCUS_ON_SHOW_attrib). + * + * Some of these attributes are ignored for full screen windows. The new + * value will take effect if the window is later made windowed. + * + * Some of these attributes are ignored for windowed mode windows. The new + * value will take effect if the window is later made full screen. + * + * @param[in] window The window to set the attribute for. + * @param[in] attrib A supported window attribute. + * @param[in] value `GLFW_TRUE` or `GLFW_FALSE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM, @ref GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @remark Calling @ref glfwGetWindowAttrib will always return the latest + * value, even if that value is ignored by the current mode of the window. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_attribs + * @sa @ref glfwGetWindowAttrib + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowAttrib(GLFWwindow* window, int attrib, int value); + +/*! @brief Sets the user pointer of the specified window. + * + * This function sets the user-defined pointer of the specified window. The + * current value is retained until the window is destroyed. The initial value + * is `NULL`. + * + * @param[in] window The window whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_userptr + * @sa @ref glfwGetWindowUserPointer + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer); + +/*! @brief Returns the user pointer of the specified window. + * + * This function returns the current value of the user-defined pointer of the + * specified window. The initial value is `NULL`. + * + * @param[in] window The window whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref window_userptr + * @sa @ref glfwSetWindowUserPointer + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI void* glfwGetWindowUserPointer(GLFWwindow* window); + +/*! @brief Sets the position callback for the specified window. + * + * This function sets the position callback of the specified window, which is + * called when the window is moved. The callback is provided with the + * position, in screen coordinates, of the upper-left corner of the content + * area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int xpos, int ypos) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowposfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @wayland This callback will never be called, as there is no way for + * an application to know its global position. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_pos + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun callback); + +/*! @brief Sets the size callback for the specified window. + * + * This function sets the size callback of the specified window, which is + * called when the window is resized. The callback is provided with the size, + * in screen coordinates, of the content area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowsizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_size + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun callback); + +/*! @brief Sets the close callback for the specified window. + * + * This function sets the close callback of the specified window, which is + * called when the user attempts to close the window, for example by clicking + * the close widget in the title bar. + * + * The close flag is set before this callback is called, but you can modify it + * at any time with @ref glfwSetWindowShouldClose. + * + * The close callback is not triggered by @ref glfwDestroyWindow. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowclosefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @macos Selecting Quit from the application menu will trigger the + * close callback for all windows. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_close + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun callback); + +/*! @brief Sets the refresh callback for the specified window. + * + * This function sets the refresh callback of the specified window, which is + * called when the content area of the window needs to be redrawn, for example + * if the window has been exposed after having been covered by another window. + * + * On compositing window systems such as Aero, Compiz, Aqua or Wayland, where + * the window contents are saved off-screen, this callback may be called only + * very infrequently or never at all. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window); + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowrefreshfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_refresh + * + * @since Added in version 2.5. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup window + */ +GLFWAPI GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun callback); + +/*! @brief Sets the focus callback for the specified window. + * + * This function sets the focus callback of the specified window, which is + * called when the window gains or loses input focus. + * + * After the focus callback is called for a window that lost input focus, + * synthetic key and mouse button release events will be generated for all such + * that had been pressed. For more information, see @ref glfwSetKeyCallback + * and @ref glfwSetMouseButtonCallback. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int focused) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowfocusfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_focus + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun callback); + +/*! @brief Sets the iconify callback for the specified window. + * + * This function sets the iconification callback of the specified window, which + * is called when the window is iconified or restored. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int iconified) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowiconifyfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @wayland The wl_shell protocol has no concept of iconification, + * this callback will never be called when using this deprecated protocol. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_iconify + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun callback); + +/*! @brief Sets the maximize callback for the specified window. + * + * This function sets the maximization callback of the specified window, which + * is called when the window is maximized or restored. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int maximized) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowmaximizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_maximize + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI GLFWwindowmaximizefun glfwSetWindowMaximizeCallback(GLFWwindow* window, GLFWwindowmaximizefun callback); + +/*! @brief Sets the framebuffer resize callback for the specified window. + * + * This function sets the framebuffer resize callback of the specified window, + * which is called when the framebuffer of the specified window is resized. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int width, int height) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWframebuffersizefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_fbsize + * + * @since Added in version 3.0. + * + * @ingroup window + */ +GLFWAPI GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun callback); + +/*! @brief Sets the window content scale callback for the specified window. + * + * This function sets the window content scale callback of the specified window, + * which is called when the content scale of the specified window changes. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, float xscale, float yscale) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWwindowcontentscalefun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref window_scale + * @sa @ref glfwGetWindowContentScale + * + * @since Added in version 3.3. + * + * @ingroup window + */ +GLFWAPI GLFWwindowcontentscalefun glfwSetWindowContentScaleCallback(GLFWwindow* window, GLFWwindowcontentscalefun callback); + +/*! @brief Processes all pending events. + * + * This function processes only those events that are already in the event + * queue and then returns immediately. Processing events will cause the window + * and input callbacks associated with those events to be called. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwWaitEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 1.0. + * + * @ingroup window + */ +GLFWAPI void glfwPollEvents(void); + +/*! @brief Waits until events are queued and processes them. + * + * This function puts the calling thread to sleep until at least one event is + * available in the event queue. Once one or more events are available, + * it behaves exactly like @ref glfwPollEvents, i.e. the events in the queue + * are processed and the function then returns immediately. Processing events + * will cause the window and input callbacks associated with those events to be + * called. + * + * Since not all events are associated with callbacks, this function may return + * without a callback having been called even if you are monitoring all + * callbacks. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwPollEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 2.5. + * + * @ingroup window + */ +GLFWAPI void glfwWaitEvents(void); + +/*! @brief Waits with timeout until events are queued and processes them. + * + * This function puts the calling thread to sleep until at least one event is + * available in the event queue, or until the specified timeout is reached. If + * one or more events are available, it behaves exactly like @ref + * glfwPollEvents, i.e. the events in the queue are processed and the function + * then returns immediately. Processing events will cause the window and input + * callbacks associated with those events to be called. + * + * The timeout value must be a positive finite number. + * + * Since not all events are associated with callbacks, this function may return + * without a callback having been called even if you are monitoring all + * callbacks. + * + * On some platforms, a window move, resize or menu operation will cause event + * processing to block. This is due to how event processing is designed on + * those platforms. You can use the + * [window refresh callback](@ref window_refresh) to redraw the contents of + * your window when necessary during such operations. + * + * Do not assume that callbacks you set will _only_ be called in response to + * event processing functions like this one. While it is necessary to poll for + * events, window systems that require GLFW to register callbacks of its own + * can pass events to GLFW in response to many window system function calls. + * GLFW will pass those events on to the application callbacks before + * returning. + * + * Event processing is not required for joystick input to work. + * + * @param[in] timeout The maximum amount of time, in seconds, to wait. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_VALUE and @ref GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref events + * @sa @ref glfwPollEvents + * @sa @ref glfwWaitEvents + * + * @since Added in version 3.2. + * + * @ingroup window + */ +GLFWAPI void glfwWaitEventsTimeout(double timeout); + +/*! @brief Posts an empty event to the event queue. + * + * This function posts an empty event from the current thread to the event + * queue, causing @ref glfwWaitEvents or @ref glfwWaitEventsTimeout to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref events + * @sa @ref glfwWaitEvents + * @sa @ref glfwWaitEventsTimeout + * + * @since Added in version 3.1. + * + * @ingroup window + */ +GLFWAPI void glfwPostEmptyEvent(void); + +/*! @brief Returns the value of an input option for the specified window. + * + * This function returns the value of an input option for the specified window. + * The mode must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS, + * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or + * @ref GLFW_RAW_MOUSE_MOTION. + * + * @param[in] window The window to query. + * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`, + * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or + * `GLFW_RAW_MOUSE_MOTION`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref glfwSetInputMode + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI int glfwGetInputMode(GLFWwindow* window, int mode); + +/*! @brief Sets an input option for the specified window. + * + * This function sets an input mode option for the specified window. The mode + * must be one of @ref GLFW_CURSOR, @ref GLFW_STICKY_KEYS, + * @ref GLFW_STICKY_MOUSE_BUTTONS, @ref GLFW_LOCK_KEY_MODS or + * @ref GLFW_RAW_MOUSE_MOTION. + * + * If the mode is `GLFW_CURSOR`, the value must be one of the following cursor + * modes: + * - `GLFW_CURSOR_NORMAL` makes the cursor visible and behaving normally. + * - `GLFW_CURSOR_HIDDEN` makes the cursor invisible when it is over the + * content area of the window but does not restrict the cursor from leaving. + * - `GLFW_CURSOR_DISABLED` hides and grabs the cursor, providing virtual + * and unlimited cursor movement. This is useful for implementing for + * example 3D camera controls. + * + * If the mode is `GLFW_STICKY_KEYS`, the value must be either `GLFW_TRUE` to + * enable sticky keys, or `GLFW_FALSE` to disable it. If sticky keys are + * enabled, a key press will ensure that @ref glfwGetKey returns `GLFW_PRESS` + * the next time it is called even if the key had been released before the + * call. This is useful when you are only interested in whether keys have been + * pressed but not when or in which order. + * + * If the mode is `GLFW_STICKY_MOUSE_BUTTONS`, the value must be either + * `GLFW_TRUE` to enable sticky mouse buttons, or `GLFW_FALSE` to disable it. + * If sticky mouse buttons are enabled, a mouse button press will ensure that + * @ref glfwGetMouseButton returns `GLFW_PRESS` the next time it is called even + * if the mouse button had been released before the call. This is useful when + * you are only interested in whether mouse buttons have been pressed but not + * when or in which order. + * + * If the mode is `GLFW_LOCK_KEY_MODS`, the value must be either `GLFW_TRUE` to + * enable lock key modifier bits, or `GLFW_FALSE` to disable them. If enabled, + * callbacks that receive modifier bits will also have the @ref + * GLFW_MOD_CAPS_LOCK bit set when the event was generated with Caps Lock on, + * and the @ref GLFW_MOD_NUM_LOCK bit when Num Lock was on. + * + * If the mode is `GLFW_RAW_MOUSE_MOTION`, the value must be either `GLFW_TRUE` + * to enable raw (unscaled and unaccelerated) mouse motion when the cursor is + * disabled, or `GLFW_FALSE` to disable it. If raw motion is not supported, + * attempting to set this will emit @ref GLFW_PLATFORM_ERROR. Call @ref + * glfwRawMouseMotionSupported to check for support. + * + * @param[in] window The window whose input mode to set. + * @param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS`, + * `GLFW_STICKY_MOUSE_BUTTONS`, `GLFW_LOCK_KEY_MODS` or + * `GLFW_RAW_MOUSE_MOTION`. + * @param[in] value The new value of the specified input mode. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref glfwGetInputMode + * + * @since Added in version 3.0. Replaces `glfwEnable` and `glfwDisable`. + * + * @ingroup input + */ +GLFWAPI void glfwSetInputMode(GLFWwindow* window, int mode, int value); + +/*! @brief Returns whether raw mouse motion is supported. + * + * This function returns whether raw mouse motion is supported on the current + * system. This status does not change after GLFW has been initialized so you + * only need to check this once. If you attempt to enable raw motion on + * a system that does not support it, @ref GLFW_PLATFORM_ERROR will be emitted. + * + * Raw mouse motion is closer to the actual motion of the mouse across + * a surface. It is not affected by the scaling and acceleration applied to + * the motion of the desktop cursor. That processing is suitable for a cursor + * while raw motion is better for controlling for example a 3D camera. Because + * of this, raw mouse motion is only provided when the cursor is disabled. + * + * @return `GLFW_TRUE` if raw mouse motion is supported on the current machine, + * or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref raw_mouse_motion + * @sa @ref glfwSetInputMode + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwRawMouseMotionSupported(void); + +/*! @brief Returns the layout-specific name of the specified printable key. + * + * This function returns the name of the specified printable key, encoded as + * UTF-8. This is typically the character that key would produce without any + * modifier keys, intended for displaying key bindings to the user. For dead + * keys, it is typically the diacritic it would add to a character. + * + * __Do not use this function__ for [text input](@ref input_char). You will + * break text input for many languages even if it happens to work for yours. + * + * If the key is `GLFW_KEY_UNKNOWN`, the scancode is used to identify the key, + * otherwise the scancode is ignored. If you specify a non-printable key, or + * `GLFW_KEY_UNKNOWN` and a scancode that maps to a non-printable key, this + * function returns `NULL` but does not emit an error. + * + * This behavior allows you to always pass in the arguments in the + * [key callback](@ref input_key) without modification. + * + * The printable keys are: + * - `GLFW_KEY_APOSTROPHE` + * - `GLFW_KEY_COMMA` + * - `GLFW_KEY_MINUS` + * - `GLFW_KEY_PERIOD` + * - `GLFW_KEY_SLASH` + * - `GLFW_KEY_SEMICOLON` + * - `GLFW_KEY_EQUAL` + * - `GLFW_KEY_LEFT_BRACKET` + * - `GLFW_KEY_RIGHT_BRACKET` + * - `GLFW_KEY_BACKSLASH` + * - `GLFW_KEY_WORLD_1` + * - `GLFW_KEY_WORLD_2` + * - `GLFW_KEY_0` to `GLFW_KEY_9` + * - `GLFW_KEY_A` to `GLFW_KEY_Z` + * - `GLFW_KEY_KP_0` to `GLFW_KEY_KP_9` + * - `GLFW_KEY_KP_DECIMAL` + * - `GLFW_KEY_KP_DIVIDE` + * - `GLFW_KEY_KP_MULTIPLY` + * - `GLFW_KEY_KP_SUBTRACT` + * - `GLFW_KEY_KP_ADD` + * - `GLFW_KEY_KP_EQUAL` + * + * Names for printable keys depend on keyboard layout, while names for + * non-printable keys are the same across layouts but depend on the application + * language and should be localized along with other user interface text. + * + * @param[in] key The key to query, or `GLFW_KEY_UNKNOWN`. + * @param[in] scancode The scancode of the key to query. + * @return The UTF-8 encoded, layout-specific name of the key, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark The contents of the returned string may change when a keyboard + * layout change event is received. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key_name + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetKeyName(int key, int scancode); + +/*! @brief Returns the platform-specific scancode of the specified key. + * + * This function returns the platform-specific scancode of the specified key. + * + * If the key is `GLFW_KEY_UNKNOWN` or does not exist on the keyboard this + * method will return `-1`. + * + * @param[in] key Any [named key](@ref keys). + * @return The platform-specific scancode for the key, or `-1` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref input_key + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwGetKeyScancode(int key); + +/*! @brief Returns the last reported state of a keyboard key for the specified + * window. + * + * This function returns the last state reported for the specified key to the + * specified window. The returned state is one of `GLFW_PRESS` or + * `GLFW_RELEASE`. The higher-level action `GLFW_REPEAT` is only reported to + * the key callback. + * + * If the @ref GLFW_STICKY_KEYS input mode is enabled, this function returns + * `GLFW_PRESS` the first time you call it for a key that was pressed, even if + * that key has already been released. + * + * The key functions deal with physical keys, with [key tokens](@ref keys) + * named after their use on the standard US keyboard layout. If you want to + * input text, use the Unicode character callback instead. + * + * The [modifier key bit masks](@ref mods) are not key tokens and cannot be + * used with this function. + * + * __Do not use this function__ to implement [text input](@ref input_char). + * + * @param[in] window The desired window. + * @param[in] key The desired [keyboard key](@ref keys). `GLFW_KEY_UNKNOWN` is + * not a valid key for this function. + * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +GLFWAPI int glfwGetKey(GLFWwindow* window, int key); + +/*! @brief Returns the last reported state of a mouse button for the specified + * window. + * + * This function returns the last state reported for the specified mouse button + * to the specified window. The returned state is one of `GLFW_PRESS` or + * `GLFW_RELEASE`. + * + * If the @ref GLFW_STICKY_MOUSE_BUTTONS input mode is enabled, this function + * returns `GLFW_PRESS` the first time you call it for a mouse button that was + * pressed, even if that mouse button has already been released. + * + * @param[in] window The desired window. + * @param[in] button The desired [mouse button](@ref buttons). + * @return One of `GLFW_PRESS` or `GLFW_RELEASE`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_mouse_button + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup input + */ +GLFWAPI int glfwGetMouseButton(GLFWwindow* window, int button); + +/*! @brief Retrieves the position of the cursor relative to the content area of + * the window. + * + * This function returns the position of the cursor, in screen coordinates, + * relative to the upper-left corner of the content area of the specified + * window. + * + * If the cursor is disabled (with `GLFW_CURSOR_DISABLED`) then the cursor + * position is unbounded and limited only by the minimum and maximum values of + * a `double`. + * + * The coordinate can be converted to their integer equivalents with the + * `floor` function. Casting directly to an integer type works for positive + * coordinates, but fails for negative ones. + * + * Any or all of the position arguments may be `NULL`. If an error occurs, all + * non-`NULL` position arguments will be set to zero. + * + * @param[in] window The desired window. + * @param[out] xpos Where to store the cursor x-coordinate, relative to the + * left edge of the content area, or `NULL`. + * @param[out] ypos Where to store the cursor y-coordinate, relative to the to + * top edge of the content area, or `NULL`. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * @sa @ref glfwSetCursorPos + * + * @since Added in version 3.0. Replaces `glfwGetMousePos`. + * + * @ingroup input + */ +GLFWAPI void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos); + +/*! @brief Sets the position of the cursor, relative to the content area of the + * window. + * + * This function sets the position, in screen coordinates, of the cursor + * relative to the upper-left corner of the content area of the specified + * window. The window must have input focus. If the window does not have + * input focus when this function is called, it fails silently. + * + * __Do not use this function__ to implement things like camera controls. GLFW + * already provides the `GLFW_CURSOR_DISABLED` cursor mode that hides the + * cursor, transparently re-centers it and provides unconstrained cursor + * motion. See @ref glfwSetInputMode for more information. + * + * If the cursor mode is `GLFW_CURSOR_DISABLED` then the cursor position is + * unconstrained and limited only by the minimum and maximum values of + * a `double`. + * + * @param[in] window The desired window. + * @param[in] xpos The desired x-coordinate, relative to the left edge of the + * content area. + * @param[in] ypos The desired y-coordinate, relative to the top edge of the + * content area. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @remark @wayland This function will only work when the cursor mode is + * `GLFW_CURSOR_DISABLED`, otherwise it will do nothing. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * @sa @ref glfwGetCursorPos + * + * @since Added in version 3.0. Replaces `glfwSetMousePos`. + * + * @ingroup input + */ +GLFWAPI void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos); + +/*! @brief Creates a custom cursor. + * + * Creates a new custom cursor image that can be set for a window with @ref + * glfwSetCursor. The cursor can be destroyed with @ref glfwDestroyCursor. + * Any remaining cursors are destroyed by @ref glfwTerminate. + * + * The pixels are 32-bit, little-endian, non-premultiplied RGBA, i.e. eight + * bits per channel with the red channel first. They are arranged canonically + * as packed sequential rows, starting from the top-left corner. + * + * The cursor hotspot is specified in pixels, relative to the upper-left corner + * of the cursor image. Like all other coordinate systems in GLFW, the X-axis + * points to the right and the Y-axis points down. + * + * @param[in] image The desired cursor image. + * @param[in] xhot The desired x-coordinate, in pixels, of the cursor hotspot. + * @param[in] yhot The desired y-coordinate, in pixels, of the cursor hotspot. + * @return The handle of the created cursor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified image data is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * @sa @ref glfwDestroyCursor + * @sa @ref glfwCreateStandardCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcursor* glfwCreateCursor(const GLFWimage* image, int xhot, int yhot); + +/*! @brief Creates a cursor with a standard shape. + * + * Returns a cursor with a [standard shape](@ref shapes), that can be set for + * a window with @ref glfwSetCursor. + * + * @param[in] shape One of the [standard shapes](@ref shapes). + * @return A new cursor ready to use or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * @sa @ref glfwCreateCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcursor* glfwCreateStandardCursor(int shape); + +/*! @brief Destroys a cursor. + * + * This function destroys a cursor previously created with @ref + * glfwCreateCursor. Any remaining cursors will be destroyed by @ref + * glfwTerminate. + * + * If the specified cursor is current for any window, that window will be + * reverted to the default cursor. This does not affect the cursor mode. + * + * @param[in] cursor The cursor object to destroy. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @reentrancy This function must not be called from a callback. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * @sa @ref glfwCreateCursor + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI void glfwDestroyCursor(GLFWcursor* cursor); + +/*! @brief Sets the cursor for the window. + * + * This function sets the cursor image to be used when the cursor is over the + * content area of the specified window. The set cursor will only be visible + * when the [cursor mode](@ref cursor_mode) of the window is + * `GLFW_CURSOR_NORMAL`. + * + * On some platforms, the set cursor may not be visible unless the window also + * has input focus. + * + * @param[in] window The window to set the cursor for. + * @param[in] cursor The cursor to set, or `NULL` to switch back to the default + * arrow cursor. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_object + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI void glfwSetCursor(GLFWwindow* window, GLFWcursor* cursor); + +/*! @brief Sets the key callback. + * + * This function sets the key callback of the specified window, which is called + * when a key is pressed, repeated or released. + * + * The key functions deal with physical keys, with layout independent + * [key tokens](@ref keys) named after their values in the standard US keyboard + * layout. If you want to input text, use the + * [character callback](@ref glfwSetCharCallback) instead. + * + * When a window loses input focus, it will generate synthetic key release + * events for all pressed keys. You can tell these events from user-generated + * events by the fact that the synthetic ones are generated after the focus + * loss event has been processed, i.e. after the + * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. + * + * The scancode of a key is specific to that platform or sometimes even to that + * machine. Scancodes are intended to allow users to bind keys that don't have + * a GLFW key token. Such keys have `key` set to `GLFW_KEY_UNKNOWN`, their + * state is not saved and so it cannot be queried with @ref glfwGetKey. + * + * Sometimes GLFW needs to generate synthetic key events, in which case the + * scancode may be zero. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new key callback, or `NULL` to remove the currently + * set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int key, int scancode, int action, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWkeyfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_key + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun callback); + +/*! @brief Sets the Unicode character callback. + * + * This function sets the character callback of the specified window, which is + * called when a Unicode character is input. + * + * The character callback is intended for Unicode text input. As it deals with + * characters, it is keyboard layout dependent, whereas the + * [key callback](@ref glfwSetKeyCallback) is not. Characters do not map 1:1 + * to physical keys, as a key may produce zero, one or more characters. If you + * want to know whether a specific physical key was pressed or released, see + * the key callback instead. + * + * The character callback behaves as system text input normally does and will + * not be called if modifier keys are held down that would prevent normal text + * input on that platform, for example a Super (Command) key on macOS or Alt key + * on Windows. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcharfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_char + * + * @since Added in version 2.4. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun callback); + +/*! @brief Sets the Unicode character with modifiers callback. + * + * This function sets the character with modifiers callback of the specified + * window, which is called when a Unicode character is input regardless of what + * modifier keys are used. + * + * The character with modifiers callback is intended for implementing custom + * Unicode character input. For regular Unicode text input, see the + * [character callback](@ref glfwSetCharCallback). Like the character + * callback, the character with modifiers callback deals with characters and is + * keyboard layout dependent. Characters do not map 1:1 to physical keys, as + * a key may produce zero, one or more characters. If you want to know whether + * a specific physical key was pressed or released, see the + * [key callback](@ref glfwSetKeyCallback) instead. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or an + * [error](@ref error_handling) occurred. + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, unsigned int codepoint, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcharmodsfun). + * + * @deprecated Scheduled for removal in version 4.0. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_char + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWcharmodsfun glfwSetCharModsCallback(GLFWwindow* window, GLFWcharmodsfun callback); + +/*! @brief Sets the mouse button callback. + * + * This function sets the mouse button callback of the specified window, which + * is called when a mouse button is pressed or released. + * + * When a window loses input focus, it will generate synthetic mouse button + * release events for all pressed mouse buttons. You can tell these events + * from user-generated events by the fact that the synthetic ones are generated + * after the focus loss event has been processed, i.e. after the + * [window focus callback](@ref glfwSetWindowFocusCallback) has been called. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int button, int action, int mods) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWmousebuttonfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref input_mouse_button + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter and return value. + * + * @ingroup input + */ +GLFWAPI GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun callback); + +/*! @brief Sets the cursor position callback. + * + * This function sets the cursor position callback of the specified window, + * which is called when the cursor is moved. The callback is provided with the + * position, in screen coordinates, relative to the upper-left corner of the + * content area of the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, double xpos, double ypos); + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcursorposfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_pos + * + * @since Added in version 3.0. Replaces `glfwSetMousePosCallback`. + * + * @ingroup input + */ +GLFWAPI GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun callback); + +/*! @brief Sets the cursor enter/leave callback. + * + * This function sets the cursor boundary crossing callback of the specified + * window, which is called when the cursor enters or leaves the content area of + * the window. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int entered) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWcursorenterfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref cursor_enter + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun callback); + +/*! @brief Sets the scroll callback. + * + * This function sets the scroll callback of the specified window, which is + * called when a scrolling device is used, such as a mouse wheel or scrolling + * area of a touchpad. + * + * The scroll callback receives all scrolling input, like that from a mouse + * wheel or a touchpad scrolling area. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new scroll callback, or `NULL` to remove the + * currently set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, double xoffset, double yoffset) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWscrollfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref scrolling + * + * @since Added in version 3.0. Replaces `glfwSetMouseWheelCallback`. + * + * @ingroup input + */ +GLFWAPI GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun callback); + +/*! @brief Sets the path drop callback. + * + * This function sets the path drop callback of the specified window, which is + * called when one or more dragged paths are dropped on the window. + * + * Because the path array and its strings may have been generated specifically + * for that event, they are not guaranteed to be valid after the callback has + * returned. If you wish to use them after the callback returns, you need to + * make a deep copy. + * + * @param[in] window The window whose callback to set. + * @param[in] callback The new file drop callback, or `NULL` to remove the + * currently set callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(GLFWwindow* window, int path_count, const char* paths[]) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWdropfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @remark @wayland File drop is currently unimplemented. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref path_drop + * + * @since Added in version 3.1. + * + * @ingroup input + */ +GLFWAPI GLFWdropfun glfwSetDropCallback(GLFWwindow* window, GLFWdropfun callback); + +/*! @brief Returns whether the specified joystick is present. + * + * This function returns whether the specified joystick is present. + * + * There is no need to call this function before other functions that accept + * a joystick ID, as they all check for presence before performing any other + * work. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return `GLFW_TRUE` if the joystick is present, or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick + * + * @since Added in version 3.0. Replaces `glfwGetJoystickParam`. + * + * @ingroup input + */ +GLFWAPI int glfwJoystickPresent(int jid); + +/*! @brief Returns the values of all axes of the specified joystick. + * + * This function returns the values of all axes of the specified joystick. + * Each element in the array is a value between -1.0 and 1.0. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of axis values in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of axis values, or `NULL` if the joystick is not present or + * an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_axis + * + * @since Added in version 3.0. Replaces `glfwGetJoystickPos`. + * + * @ingroup input + */ +GLFWAPI const float* glfwGetJoystickAxes(int jid, int* count); + +/*! @brief Returns the state of all buttons of the specified joystick. + * + * This function returns the state of all buttons of the specified joystick. + * Each element in the array is either `GLFW_PRESS` or `GLFW_RELEASE`. + * + * For backward compatibility with earlier versions that did not have @ref + * glfwGetJoystickHats, the button array also includes all hats, each + * represented as four buttons. The hats are in the same order as returned by + * __glfwGetJoystickHats__ and are in the order _up_, _right_, _down_ and + * _left_. To disable these extra buttons, set the @ref + * GLFW_JOYSTICK_HAT_BUTTONS init hint before initialization. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of button states in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of button states, or `NULL` if the joystick is not present + * or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_button + * + * @since Added in version 2.2. + * @glfw3 Changed to return a dynamic array. + * + * @ingroup input + */ +GLFWAPI const unsigned char* glfwGetJoystickButtons(int jid, int* count); + +/*! @brief Returns the state of all hats of the specified joystick. + * + * This function returns the state of all hats of the specified joystick. + * Each element in the array is one of the following values: + * + * Name | Value + * ---- | ----- + * `GLFW_HAT_CENTERED` | 0 + * `GLFW_HAT_UP` | 1 + * `GLFW_HAT_RIGHT` | 2 + * `GLFW_HAT_DOWN` | 4 + * `GLFW_HAT_LEFT` | 8 + * `GLFW_HAT_RIGHT_UP` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_UP` + * `GLFW_HAT_RIGHT_DOWN` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_DOWN` + * `GLFW_HAT_LEFT_UP` | `GLFW_HAT_LEFT` \| `GLFW_HAT_UP` + * `GLFW_HAT_LEFT_DOWN` | `GLFW_HAT_LEFT` \| `GLFW_HAT_DOWN` + * + * The diagonal directions are bitwise combinations of the primary (up, right, + * down and left) directions and you can test for these individually by ANDing + * it with the corresponding direction. + * + * @code + * if (hats[2] & GLFW_HAT_RIGHT) + * { + * // State of hat 2 could be right-up, right or right-down + * } + * @endcode + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] count Where to store the number of hat states in the returned + * array. This is set to zero if the joystick is not present or an error + * occurred. + * @return An array of hat states, or `NULL` if the joystick is not present + * or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected, this function is called again for that joystick or the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_hat + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const unsigned char* glfwGetJoystickHats(int jid, int* count); + +/*! @brief Returns the name of the specified joystick. + * + * This function returns the name, encoded as UTF-8, of the specified joystick. + * The returned string is allocated and freed by GLFW. You should not free it + * yourself. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded name of the joystick, or `NULL` if the joystick + * is not present or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_name + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetJoystickName(int jid); + +/*! @brief Returns the SDL compatible GUID of the specified joystick. + * + * This function returns the SDL compatible GUID, as a UTF-8 encoded + * hexadecimal string, of the specified joystick. The returned string is + * allocated and freed by GLFW. You should not free it yourself. + * + * The GUID is what connects a joystick to a gamepad mapping. A connected + * joystick will always have a GUID even if there is no gamepad mapping + * assigned to it. + * + * If the specified joystick is not present this function will return `NULL` + * but will not generate an error. This can be used instead of first calling + * @ref glfwJoystickPresent. + * + * The GUID uses the format introduced in SDL 2.0.5. This GUID tries to + * uniquely identify the make and model of a joystick but does not identify + * a specific unit, e.g. all wired Xbox 360 controllers will have the same + * GUID on that platform. The GUID for a unit may vary between platforms + * depending on what hardware information the platform specific APIs provide. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded GUID of the joystick, or `NULL` if the joystick + * is not present or an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_INVALID_ENUM and @ref GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetJoystickGUID(int jid); + +/*! @brief Sets the user pointer of the specified joystick. + * + * This function sets the user-defined pointer of the specified joystick. The + * current value is retained until the joystick is disconnected. The initial + * value is `NULL`. + * + * This function may be called from the joystick callback, even for a joystick + * that is being disconnected. + * + * @param[in] jid The joystick whose pointer to set. + * @param[in] pointer The new value. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref joystick_userptr + * @sa @ref glfwGetJoystickUserPointer + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI void glfwSetJoystickUserPointer(int jid, void* pointer); + +/*! @brief Returns the user pointer of the specified joystick. + * + * This function returns the current value of the user-defined pointer of the + * specified joystick. The initial value is `NULL`. + * + * This function may be called from the joystick callback, even for a joystick + * that is being disconnected. + * + * @param[in] jid The joystick whose pointer to return. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @sa @ref joystick_userptr + * @sa @ref glfwSetJoystickUserPointer + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI void* glfwGetJoystickUserPointer(int jid); + +/*! @brief Returns whether the specified joystick has a gamepad mapping. + * + * This function returns whether the specified joystick is both present and has + * a gamepad mapping. + * + * If the specified joystick is present but does not have a gamepad mapping + * this function will return `GLFW_FALSE` but will not generate an error. Call + * @ref glfwJoystickPresent to check if a joystick is present regardless of + * whether it has a mapping. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return `GLFW_TRUE` if a joystick is both present and has a gamepad mapping, + * or `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwGetGamepadState + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwJoystickIsGamepad(int jid); + +/*! @brief Sets the joystick configuration callback. + * + * This function sets the joystick configuration callback, or removes the + * currently set callback. This is called when a joystick is connected to or + * disconnected from the system. + * + * For joystick connection and disconnection events to be delivered on all + * platforms, you need to call one of the [event processing](@ref events) + * functions. Joystick disconnection may also be detected and the callback + * called by joystick functions. The function will then return whatever it + * returns if the joystick is not present. + * + * @param[in] callback The new callback, or `NULL` to remove the currently set + * callback. + * @return The previously set callback, or `NULL` if no callback was set or the + * library had not been [initialized](@ref intro_init). + * + * @callback_signature + * @code + * void function_name(int jid, int event) + * @endcode + * For more information about the callback parameters, see the + * [function pointer type](@ref GLFWjoystickfun). + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref joystick_event + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI GLFWjoystickfun glfwSetJoystickCallback(GLFWjoystickfun callback); + +/*! @brief Adds the specified SDL_GameControllerDB gamepad mappings. + * + * This function parses the specified ASCII encoded string and updates the + * internal list with any gamepad mappings it finds. This string may + * contain either a single gamepad mapping or many mappings separated by + * newlines. The parser supports the full format of the `gamecontrollerdb.txt` + * source file including empty lines and comments. + * + * See @ref gamepad_mapping for a description of the format. + * + * If there is already a gamepad mapping for a given GUID in the internal list, + * it will be replaced by the one passed to this function. If the library is + * terminated and re-initialized the internal list will revert to the built-in + * default. + * + * @param[in] string The string containing the gamepad mappings. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_VALUE. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwJoystickIsGamepad + * @sa @ref glfwGetGamepadName + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwUpdateGamepadMappings(const char* string); + +/*! @brief Returns the human-readable gamepad name for the specified joystick. + * + * This function returns the human-readable name of the gamepad from the + * gamepad mapping assigned to the specified joystick. + * + * If the specified joystick is not present or does not have a gamepad mapping + * this function will return `NULL` but will not generate an error. Call + * @ref glfwJoystickPresent to check whether it is present regardless of + * whether it has a mapping. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @return The UTF-8 encoded name of the gamepad, or `NULL` if the + * joystick is not present, does not have a mapping or an + * [error](@ref error_handling) occurred. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the specified joystick is + * disconnected, the gamepad mappings are updated or the library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwJoystickIsGamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetGamepadName(int jid); + +/*! @brief Retrieves the state of the specified joystick remapped as a gamepad. + * + * This function retrieves the state of the specified joystick remapped to + * an Xbox-like gamepad. + * + * If the specified joystick is not present or does not have a gamepad mapping + * this function will return `GLFW_FALSE` but will not generate an error. Call + * @ref glfwJoystickPresent to check whether it is present regardless of + * whether it has a mapping. + * + * The Guide button may not be available for input as it is often hooked by the + * system or the Steam client. + * + * Not all devices have all the buttons or axes provided by @ref + * GLFWgamepadstate. Unavailable buttons and axes will always report + * `GLFW_RELEASE` and 0.0 respectively. + * + * @param[in] jid The [joystick](@ref joysticks) to query. + * @param[out] state The gamepad input state of the joystick. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if no joystick is + * connected, it has no gamepad mapping or an [error](@ref error_handling) + * occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_ENUM. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref gamepad + * @sa @ref glfwUpdateGamepadMappings + * @sa @ref glfwJoystickIsGamepad + * + * @since Added in version 3.3. + * + * @ingroup input + */ +GLFWAPI int glfwGetGamepadState(int jid, GLFWgamepadstate* state); + +/*! @brief Sets the clipboard to the specified string. + * + * This function sets the system clipboard to the specified, UTF-8 encoded + * string. + * + * @param[in] window Deprecated. Any valid window or `NULL`. + * @param[in] string A UTF-8 encoded string. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa @ref glfwGetClipboardString + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI void glfwSetClipboardString(GLFWwindow* window, const char* string); + +/*! @brief Returns the contents of the clipboard as a string. + * + * This function returns the contents of the system clipboard, if it contains + * or is convertible to a UTF-8 encoded string. If the clipboard is empty or + * if its contents cannot be converted, `NULL` is returned and a @ref + * GLFW_FORMAT_UNAVAILABLE error is generated. + * + * @param[in] window Deprecated. Any valid window or `NULL`. + * @return The contents of the clipboard as a UTF-8 encoded string, or `NULL` + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the next call to @ref + * glfwGetClipboardString or @ref glfwSetClipboardString, or until the library + * is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa @ref glfwSetClipboardString + * + * @since Added in version 3.0. + * + * @ingroup input + */ +GLFWAPI const char* glfwGetClipboardString(GLFWwindow* window); + +/*! @brief Returns the GLFW time. + * + * This function returns the current GLFW time, in seconds. Unless the time + * has been set using @ref glfwSetTime it measures time elapsed since GLFW was + * initialized. + * + * This function and @ref glfwSetTime are helper functions on top of @ref + * glfwGetTimerFrequency and @ref glfwGetTimerValue. + * + * The resolution of the timer is system dependent, but is usually on the order + * of a few micro- or nanoseconds. It uses the highest-resolution monotonic + * time source on each supported platform. + * + * @return The current time, in seconds, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. Reading and + * writing of the internal base time is not atomic, so it needs to be + * externally synchronized with calls to @ref glfwSetTime. + * + * @sa @ref time + * + * @since Added in version 1.0. + * + * @ingroup input + */ +GLFWAPI double glfwGetTime(void); + +/*! @brief Sets the GLFW time. + * + * This function sets the current GLFW time, in seconds. The value must be + * a positive finite number less than or equal to 18446744073.0, which is + * approximately 584.5 years. + * + * This function and @ref glfwGetTime are helper functions on top of @ref + * glfwGetTimerFrequency and @ref glfwGetTimerValue. + * + * @param[in] time The new value, in seconds. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_INVALID_VALUE. + * + * @remark The upper limit of GLFW time is calculated as + * floor((264 - 1) / 109) and is due to implementations + * storing nanoseconds in 64 bits. The limit may be increased in the future. + * + * @thread_safety This function may be called from any thread. Reading and + * writing of the internal base time is not atomic, so it needs to be + * externally synchronized with calls to @ref glfwGetTime. + * + * @sa @ref time + * + * @since Added in version 2.2. + * + * @ingroup input + */ +GLFWAPI void glfwSetTime(double time); + +/*! @brief Returns the current value of the raw timer. + * + * This function returns the current value of the raw timer, measured in + * 1 / frequency seconds. To get the frequency, call @ref + * glfwGetTimerFrequency. + * + * @return The value of the timer, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref time + * @sa @ref glfwGetTimerFrequency + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI uint64_t glfwGetTimerValue(void); + +/*! @brief Returns the frequency, in Hz, of the raw timer. + * + * This function returns the frequency, in Hz, of the raw timer. + * + * @return The frequency of the timer, in Hz, or zero if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref time + * @sa @ref glfwGetTimerValue + * + * @since Added in version 3.2. + * + * @ingroup input + */ +GLFWAPI uint64_t glfwGetTimerFrequency(void); + +/*! @brief Makes the context of the specified window current for the calling + * thread. + * + * This function makes the OpenGL or OpenGL ES context of the specified window + * current on the calling thread. A context must only be made current on + * a single thread at a time and each thread can have only a single current + * context at a time. + * + * When moving a context between threads, you must make it non-current on the + * old thread before making it current on the new one. + * + * By default, making a context non-current implicitly forces a pipeline flush. + * On machines that support `GL_KHR_context_flush_control`, you can control + * whether a context performs this flush by setting the + * [GLFW_CONTEXT_RELEASE_BEHAVIOR](@ref GLFW_CONTEXT_RELEASE_BEHAVIOR_hint) + * hint. + * + * The specified window must have an OpenGL or OpenGL ES context. Specifying + * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT + * error. + * + * @param[in] window The window whose context to make current, or `NULL` to + * detach the current context. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_current + * @sa @ref glfwGetCurrentContext + * + * @since Added in version 3.0. + * + * @ingroup context + */ +GLFWAPI void glfwMakeContextCurrent(GLFWwindow* window); + +/*! @brief Returns the window whose context is current on the calling thread. + * + * This function returns the window whose OpenGL or OpenGL ES context is + * current on the calling thread. + * + * @return The window whose context is current, or `NULL` if no window's + * context is current. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_current + * @sa @ref glfwMakeContextCurrent + * + * @since Added in version 3.0. + * + * @ingroup context + */ +GLFWAPI GLFWwindow* glfwGetCurrentContext(void); + +/*! @brief Swaps the front and back buffers of the specified window. + * + * This function swaps the front and back buffers of the specified window when + * rendering with OpenGL or OpenGL ES. If the swap interval is greater than + * zero, the GPU driver waits the specified number of screen updates before + * swapping the buffers. + * + * The specified window must have an OpenGL or OpenGL ES context. Specifying + * a window without a context will generate a @ref GLFW_NO_WINDOW_CONTEXT + * error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see `vkQueuePresentKHR` instead. + * + * @param[in] window The window whose buffers to swap. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_WINDOW_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark __EGL:__ The context of the specified window must be current on the + * calling thread. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref buffer_swap + * @sa @ref glfwSwapInterval + * + * @since Added in version 1.0. + * @glfw3 Added window handle parameter. + * + * @ingroup window + */ +GLFWAPI void glfwSwapBuffers(GLFWwindow* window); + +/*! @brief Sets the swap interval for the current context. + * + * This function sets the swap interval for the current OpenGL or OpenGL ES + * context, i.e. the number of screen updates to wait from the time @ref + * glfwSwapBuffers was called before swapping the buffers and returning. This + * is sometimes called _vertical synchronization_, _vertical retrace + * synchronization_ or just _vsync_. + * + * A context that supports either of the `WGL_EXT_swap_control_tear` and + * `GLX_EXT_swap_control_tear` extensions also accepts _negative_ swap + * intervals, which allows the driver to swap immediately even if a frame + * arrives a little bit late. You can check for these extensions with @ref + * glfwExtensionSupported. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see the present mode of your swapchain instead. + * + * @param[in] interval The minimum number of screen updates to wait for + * until the buffers are swapped by @ref glfwSwapBuffers. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark This function is not called during context creation, leaving the + * swap interval set to whatever is the default on that platform. This is done + * because some swap interval extensions used by GLFW do not allow the swap + * interval to be reset to zero once it has been set to a non-zero value. + * + * @remark Some GPU drivers do not honor the requested swap interval, either + * because of a user setting that overrides the application's request or due to + * bugs in the driver. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref buffer_swap + * @sa @ref glfwSwapBuffers + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI void glfwSwapInterval(int interval); + +/*! @brief Returns whether the specified extension is available. + * + * This function returns whether the specified + * [API extension](@ref context_glext) is supported by the current OpenGL or + * OpenGL ES context. It searches both for client API extension and context + * creation API extensions. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * As this functions retrieves and searches one or more extension strings each + * call, it is recommended that you cache its results if it is going to be used + * frequently. The extension strings will not change during the lifetime of + * a context, so there is no danger in doing this. + * + * This function does not apply to Vulkan. If you are using Vulkan, see @ref + * glfwGetRequiredInstanceExtensions, `vkEnumerateInstanceExtensionProperties` + * and `vkEnumerateDeviceExtensionProperties` instead. + * + * @param[in] extension The ASCII encoded name of the extension. + * @return `GLFW_TRUE` if the extension is available, or `GLFW_FALSE` + * otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT, @ref GLFW_INVALID_VALUE and @ref + * GLFW_PLATFORM_ERROR. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_glext + * @sa @ref glfwGetProcAddress + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI int glfwExtensionSupported(const char* extension); + +/*! @brief Returns the address of the specified function for the current + * context. + * + * This function returns the address of the specified OpenGL or OpenGL ES + * [core or extension function](@ref context_glext), if it is supported + * by the current context. + * + * A context must be current on the calling thread. Calling this function + * without a current context will cause a @ref GLFW_NO_CURRENT_CONTEXT error. + * + * This function does not apply to Vulkan. If you are rendering with Vulkan, + * see @ref glfwGetInstanceProcAddress, `vkGetInstanceProcAddr` and + * `vkGetDeviceProcAddr` instead. + * + * @param[in] procname The ASCII encoded name of the function. + * @return The address of the function, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_NO_CURRENT_CONTEXT and @ref GLFW_PLATFORM_ERROR. + * + * @remark The address of a given function is not guaranteed to be the same + * between contexts. + * + * @remark This function may return a non-`NULL` address despite the + * associated version or extension not being available. Always check the + * context version or extension string first. + * + * @pointer_lifetime The returned function pointer is valid until the context + * is destroyed or the library is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref context_glext + * @sa @ref glfwExtensionSupported + * + * @since Added in version 1.0. + * + * @ingroup context + */ +GLFWAPI GLFWglproc glfwGetProcAddress(const char* procname); + +/*! @brief Returns whether the Vulkan loader and an ICD have been found. + * + * This function returns whether the Vulkan loader and any minimally functional + * ICD have been found. + * + * The availability of a Vulkan loader and even an ICD does not by itself + * guarantee that surface creation or even instance creation is possible. + * For example, on Fermi systems Nvidia will install an ICD that provides no + * actual Vulkan support. Call @ref glfwGetRequiredInstanceExtensions to check + * whether the extensions necessary for Vulkan surface creation are available + * and @ref glfwGetPhysicalDevicePresentationSupport to check whether a queue + * family of a physical device supports image presentation. + * + * @return `GLFW_TRUE` if Vulkan is minimally available, or `GLFW_FALSE` + * otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_support + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI int glfwVulkanSupported(void); + +/*! @brief Returns the Vulkan instance extensions required by GLFW. + * + * This function returns an array of names of Vulkan instance extensions required + * by GLFW for creating Vulkan surfaces for GLFW windows. If successful, the + * list will always contain `VK_KHR_surface`, so if you don't require any + * additional extensions you can pass this list directly to the + * `VkInstanceCreateInfo` struct. + * + * If Vulkan is not available on the machine, this function returns `NULL` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available. + * + * If Vulkan is available but no set of extensions allowing window surface + * creation was found, this function returns `NULL`. You may still use Vulkan + * for off-screen rendering and compute work. + * + * @param[out] count Where to store the number of extensions in the returned + * array. This is set to zero if an error occurred. + * @return An array of ASCII encoded extension names, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_API_UNAVAILABLE. + * + * @remark Additional extensions may be required by future versions of GLFW. + * You should check if any extensions you wish to enable are already in the + * returned array, as it is an error to specify an extension more than once in + * the `VkInstanceCreateInfo` struct. + * + * @remark @macos This function currently only supports the + * `VK_MVK_macos_surface` extension from MoltenVK. + * + * @pointer_lifetime The returned array is allocated and freed by GLFW. You + * should not free it yourself. It is guaranteed to be valid only until the + * library is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_ext + * @sa @ref glfwCreateWindowSurface + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI const char** glfwGetRequiredInstanceExtensions(uint32_t* count); + +#if defined(VK_VERSION_1_0) + +/*! @brief Returns the address of the specified Vulkan instance function. + * + * This function returns the address of the specified Vulkan core or extension + * function for the specified instance. If instance is set to `NULL` it can + * return any function exported from the Vulkan loader, including at least the + * following functions: + * + * - `vkEnumerateInstanceExtensionProperties` + * - `vkEnumerateInstanceLayerProperties` + * - `vkCreateInstance` + * - `vkGetInstanceProcAddr` + * + * If Vulkan is not available on the machine, this function returns `NULL` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available. + * + * This function is equivalent to calling `vkGetInstanceProcAddr` with + * a platform-specific query of the Vulkan loader as a fallback. + * + * @param[in] instance The Vulkan instance to query, or `NULL` to retrieve + * functions related to instance creation. + * @param[in] procname The ASCII encoded name of the function. + * @return The address of the function, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_API_UNAVAILABLE. + * + * @pointer_lifetime The returned function pointer is valid until the library + * is terminated. + * + * @thread_safety This function may be called from any thread. + * + * @sa @ref vulkan_proc + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI GLFWvkproc glfwGetInstanceProcAddress(VkInstance instance, const char* procname); + +/*! @brief Returns whether the specified queue family can present images. + * + * This function returns whether the specified queue family of the specified + * physical device supports presentation to the platform GLFW was built for. + * + * If Vulkan or the required window surface creation instance extensions are + * not available on the machine, or if the specified instance was not created + * with the required extensions, this function returns `GLFW_FALSE` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported + * to check whether Vulkan is at least minimally available and @ref + * glfwGetRequiredInstanceExtensions to check what instance extensions are + * required. + * + * @param[in] instance The instance that the physical device belongs to. + * @param[in] device The physical device that the queue family belongs to. + * @param[in] queuefamily The index of the queue family to query. + * @return `GLFW_TRUE` if the queue family supports presentation, or + * `GLFW_FALSE` otherwise. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_API_UNAVAILABLE and @ref GLFW_PLATFORM_ERROR. + * + * @remark @macos This function currently always returns `GLFW_TRUE`, as the + * `VK_MVK_macos_surface` extension does not provide + * a `vkGetPhysicalDevice*PresentationSupport` type function. + * + * @thread_safety This function may be called from any thread. For + * synchronization details of Vulkan objects, see the Vulkan specification. + * + * @sa @ref vulkan_present + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI int glfwGetPhysicalDevicePresentationSupport(VkInstance instance, VkPhysicalDevice device, uint32_t queuefamily); + +/*! @brief Creates a Vulkan surface for the specified window. + * + * This function creates a Vulkan surface for the specified window. + * + * If the Vulkan loader or at least one minimally functional ICD were not found, + * this function returns `VK_ERROR_INITIALIZATION_FAILED` and generates a @ref + * GLFW_API_UNAVAILABLE error. Call @ref glfwVulkanSupported to check whether + * Vulkan is at least minimally available. + * + * If the required window surface creation instance extensions are not + * available or if the specified instance was not created with these extensions + * enabled, this function returns `VK_ERROR_EXTENSION_NOT_PRESENT` and + * generates a @ref GLFW_API_UNAVAILABLE error. Call @ref + * glfwGetRequiredInstanceExtensions to check what instance extensions are + * required. + * + * The window surface cannot be shared with another API so the window must + * have been created with the [client api hint](@ref GLFW_CLIENT_API_attrib) + * set to `GLFW_NO_API` otherwise it generates a @ref GLFW_INVALID_VALUE error + * and returns `VK_ERROR_NATIVE_WINDOW_IN_USE_KHR`. + * + * The window surface must be destroyed before the specified Vulkan instance. + * It is the responsibility of the caller to destroy the window surface. GLFW + * does not destroy it for you. Call `vkDestroySurfaceKHR` to destroy the + * surface. + * + * @param[in] instance The Vulkan instance to create the surface in. + * @param[in] window The window to create the surface for. + * @param[in] allocator The allocator to use, or `NULL` to use the default + * allocator. + * @param[out] surface Where to store the handle of the surface. This is set + * to `VK_NULL_HANDLE` if an error occurred. + * @return `VK_SUCCESS` if successful, or a Vulkan error code if an + * [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED, @ref + * GLFW_API_UNAVAILABLE, @ref GLFW_PLATFORM_ERROR and @ref GLFW_INVALID_VALUE + * + * @remark If an error occurs before the creation call is made, GLFW returns + * the Vulkan error code most appropriate for the error. Appropriate use of + * @ref glfwVulkanSupported and @ref glfwGetRequiredInstanceExtensions should + * eliminate almost all occurrences of these errors. + * + * @remark @macos This function currently only supports the + * `VK_MVK_macos_surface` extension from MoltenVK. + * + * @remark @macos This function creates and sets a `CAMetalLayer` instance for + * the window content view, which is required for MoltenVK to function. + * + * @thread_safety This function may be called from any thread. For + * synchronization details of Vulkan objects, see the Vulkan specification. + * + * @sa @ref vulkan_surface + * @sa @ref glfwGetRequiredInstanceExtensions + * + * @since Added in version 3.2. + * + * @ingroup vulkan + */ +GLFWAPI VkResult glfwCreateWindowSurface(VkInstance instance, GLFWwindow* window, const VkAllocationCallbacks* allocator, VkSurfaceKHR* surface); + +#endif /*VK_VERSION_1_0*/ + + +/************************************************************************* + * Global definition cleanup + *************************************************************************/ + +/* ------------------- BEGIN SYSTEM/COMPILER SPECIFIC -------------------- */ + +#ifdef GLFW_WINGDIAPI_DEFINED + #undef WINGDIAPI + #undef GLFW_WINGDIAPI_DEFINED +#endif + +#ifdef GLFW_CALLBACK_DEFINED + #undef CALLBACK + #undef GLFW_CALLBACK_DEFINED +#endif + +/* Some OpenGL related headers need GLAPIENTRY, but it is unconditionally + * defined by some gl.h variants (OpenBSD) so define it after if needed. + */ +#ifndef GLAPIENTRY + #define GLAPIENTRY APIENTRY +#endif + +/* -------------------- END SYSTEM/COMPILER SPECIFIC --------------------- */ + + +#ifdef __cplusplus +} +#endif + +#endif /* _glfw3_h_ */ + diff --git a/gl/glfw3native.h b/gl/glfw3native.h new file mode 100755 index 0000000..267e75c --- /dev/null +++ b/gl/glfw3native.h @@ -0,0 +1,525 @@ +/************************************************************************* + * GLFW 3.3 - www.glfw.org + * A library for OpenGL, window and input + *------------------------------------------------------------------------ + * Copyright (c) 2002-2006 Marcus Geelnard + * Copyright (c) 2006-2018 Camilla Löwy + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would + * be appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not + * be misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + * + *************************************************************************/ + +#ifndef _glfw3_native_h_ +#define _glfw3_native_h_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/************************************************************************* + * Doxygen documentation + *************************************************************************/ + +/*! @file glfw3native.h + * @brief The header of the native access functions. + * + * This is the header file of the native access functions. See @ref native for + * more information. + */ +/*! @defgroup native Native access + * @brief Functions related to accessing native handles. + * + * **By using the native access functions you assert that you know what you're + * doing and how to fix problems caused by using them. If you don't, you + * shouldn't be using them.** + * + * Before the inclusion of @ref glfw3native.h, you may define zero or more + * window system API macro and zero or more context creation API macros. + * + * The chosen backends must match those the library was compiled for. Failure + * to do this will cause a link-time error. + * + * The available window API macros are: + * * `GLFW_EXPOSE_NATIVE_WIN32` + * * `GLFW_EXPOSE_NATIVE_COCOA` + * * `GLFW_EXPOSE_NATIVE_X11` + * * `GLFW_EXPOSE_NATIVE_WAYLAND` + * + * The available context API macros are: + * * `GLFW_EXPOSE_NATIVE_WGL` + * * `GLFW_EXPOSE_NATIVE_NSGL` + * * `GLFW_EXPOSE_NATIVE_GLX` + * * `GLFW_EXPOSE_NATIVE_EGL` + * * `GLFW_EXPOSE_NATIVE_OSMESA` + * + * These macros select which of the native access functions that are declared + * and which platform-specific headers to include. It is then up your (by + * definition platform-specific) code to handle which of these should be + * defined. + */ + + +/************************************************************************* + * System headers and types + *************************************************************************/ + +#if defined(GLFW_EXPOSE_NATIVE_WIN32) || defined(GLFW_EXPOSE_NATIVE_WGL) + // This is a workaround for the fact that glfw3.h needs to export APIENTRY (for + // example to allow applications to correctly declare a GL_ARB_debug_output + // callback) but windows.h assumes no one will define APIENTRY before it does + #if defined(GLFW_APIENTRY_DEFINED) + #undef APIENTRY + #undef GLFW_APIENTRY_DEFINED + #endif + #include +#elif defined(GLFW_EXPOSE_NATIVE_COCOA) || defined(GLFW_EXPOSE_NATIVE_NSGL) + #if defined(__OBJC__) + #import + #else + #include + typedef void* id; + #endif +#elif defined(GLFW_EXPOSE_NATIVE_X11) || defined(GLFW_EXPOSE_NATIVE_GLX) + #include + #include +#elif defined(GLFW_EXPOSE_NATIVE_WAYLAND) + #include +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WGL) + /* WGL is declared by windows.h */ +#endif +#if defined(GLFW_EXPOSE_NATIVE_NSGL) + /* NSGL is declared by Cocoa.h */ +#endif +#if defined(GLFW_EXPOSE_NATIVE_GLX) + #include +#endif +#if defined(GLFW_EXPOSE_NATIVE_EGL) + #include +#endif +#if defined(GLFW_EXPOSE_NATIVE_OSMESA) + #include +#endif + + +/************************************************************************* + * Functions + *************************************************************************/ + +#if defined(GLFW_EXPOSE_NATIVE_WIN32) +/*! @brief Returns the adapter device name of the specified monitor. + * + * @return The UTF-8 encoded adapter device name (for example `\\.\DISPLAY1`) + * of the specified monitor, or `NULL` if an [error](@ref error_handling) + * occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetWin32Adapter(GLFWmonitor* monitor); + +/*! @brief Returns the display device name of the specified monitor. + * + * @return The UTF-8 encoded display device name (for example + * `\\.\DISPLAY1\Monitor0`) of the specified monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetWin32Monitor(GLFWmonitor* monitor); + +/*! @brief Returns the `HWND` of the specified window. + * + * @return The `HWND` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI HWND glfwGetWin32Window(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WGL) +/*! @brief Returns the `HGLRC` of the specified window. + * + * @return The `HGLRC` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI HGLRC glfwGetWGLContext(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_COCOA) +/*! @brief Returns the `CGDirectDisplayID` of the specified monitor. + * + * @return The `CGDirectDisplayID` of the specified monitor, or + * `kCGNullDirectDisplay` if an [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI CGDirectDisplayID glfwGetCocoaMonitor(GLFWmonitor* monitor); + +/*! @brief Returns the `NSWindow` of the specified window. + * + * @return The `NSWindow` of the specified window, or `nil` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI id glfwGetCocoaWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_NSGL) +/*! @brief Returns the `NSOpenGLContext` of the specified window. + * + * @return The `NSOpenGLContext` of the specified window, or `nil` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI id glfwGetNSGLContext(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_X11) +/*! @brief Returns the `Display` used by GLFW. + * + * @return The `Display` used by GLFW, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI Display* glfwGetX11Display(void); + +/*! @brief Returns the `RRCrtc` of the specified monitor. + * + * @return The `RRCrtc` of the specified monitor, or `None` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI RRCrtc glfwGetX11Adapter(GLFWmonitor* monitor); + +/*! @brief Returns the `RROutput` of the specified monitor. + * + * @return The `RROutput` of the specified monitor, or `None` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.1. + * + * @ingroup native + */ +GLFWAPI RROutput glfwGetX11Monitor(GLFWmonitor* monitor); + +/*! @brief Returns the `Window` of the specified window. + * + * @return The `Window` of the specified window, or `None` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI Window glfwGetX11Window(GLFWwindow* window); + +/*! @brief Sets the current primary selection to the specified string. + * + * @param[in] string A UTF-8 encoded string. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The specified string is copied before this function + * returns. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa glfwGetX11SelectionString + * @sa glfwSetClipboardString + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI void glfwSetX11SelectionString(const char* string); + +/*! @brief Returns the contents of the current primary selection as a string. + * + * If the selection is empty or if its contents cannot be converted, `NULL` + * is returned and a @ref GLFW_FORMAT_UNAVAILABLE error is generated. + * + * @return The contents of the selection as a UTF-8 encoded string, or `NULL` + * if an [error](@ref error_handling) occurred. + * + * @errors Possible errors include @ref GLFW_NOT_INITIALIZED and @ref + * GLFW_PLATFORM_ERROR. + * + * @pointer_lifetime The returned string is allocated and freed by GLFW. You + * should not free it yourself. It is valid until the next call to @ref + * glfwGetX11SelectionString or @ref glfwSetX11SelectionString, or until the + * library is terminated. + * + * @thread_safety This function must only be called from the main thread. + * + * @sa @ref clipboard + * @sa glfwSetX11SelectionString + * @sa glfwGetClipboardString + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI const char* glfwGetX11SelectionString(void); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_GLX) +/*! @brief Returns the `GLXContext` of the specified window. + * + * @return The `GLXContext` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI GLXContext glfwGetGLXContext(GLFWwindow* window); + +/*! @brief Returns the `GLXWindow` of the specified window. + * + * @return The `GLXWindow` of the specified window, or `None` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI GLXWindow glfwGetGLXWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_WAYLAND) +/*! @brief Returns the `struct wl_display*` used by GLFW. + * + * @return The `struct wl_display*` used by GLFW, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_display* glfwGetWaylandDisplay(void); + +/*! @brief Returns the `struct wl_output*` of the specified monitor. + * + * @return The `struct wl_output*` of the specified monitor, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_output* glfwGetWaylandMonitor(GLFWmonitor* monitor); + +/*! @brief Returns the main `struct wl_surface*` of the specified window. + * + * @return The main `struct wl_surface*` of the specified window, or `NULL` if + * an [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.2. + * + * @ingroup native + */ +GLFWAPI struct wl_surface* glfwGetWaylandWindow(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_EGL) +/*! @brief Returns the `EGLDisplay` used by GLFW. + * + * @return The `EGLDisplay` used by GLFW, or `EGL_NO_DISPLAY` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLDisplay glfwGetEGLDisplay(void); + +/*! @brief Returns the `EGLContext` of the specified window. + * + * @return The `EGLContext` of the specified window, or `EGL_NO_CONTEXT` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLContext glfwGetEGLContext(GLFWwindow* window); + +/*! @brief Returns the `EGLSurface` of the specified window. + * + * @return The `EGLSurface` of the specified window, or `EGL_NO_SURFACE` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.0. + * + * @ingroup native + */ +GLFWAPI EGLSurface glfwGetEGLSurface(GLFWwindow* window); +#endif + +#if defined(GLFW_EXPOSE_NATIVE_OSMESA) +/*! @brief Retrieves the color buffer associated with the specified window. + * + * @param[in] window The window whose color buffer to retrieve. + * @param[out] width Where to store the width of the color buffer, or `NULL`. + * @param[out] height Where to store the height of the color buffer, or `NULL`. + * @param[out] format Where to store the OSMesa pixel format of the color + * buffer, or `NULL`. + * @param[out] buffer Where to store the address of the color buffer, or + * `NULL`. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI int glfwGetOSMesaColorBuffer(GLFWwindow* window, int* width, int* height, int* format, void** buffer); + +/*! @brief Retrieves the depth buffer associated with the specified window. + * + * @param[in] window The window whose depth buffer to retrieve. + * @param[out] width Where to store the width of the depth buffer, or `NULL`. + * @param[out] height Where to store the height of the depth buffer, or `NULL`. + * @param[out] bytesPerValue Where to store the number of bytes per depth + * buffer element, or `NULL`. + * @param[out] buffer Where to store the address of the depth buffer, or + * `NULL`. + * @return `GLFW_TRUE` if successful, or `GLFW_FALSE` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI int glfwGetOSMesaDepthBuffer(GLFWwindow* window, int* width, int* height, int* bytesPerValue, void** buffer); + +/*! @brief Returns the `OSMesaContext` of the specified window. + * + * @return The `OSMesaContext` of the specified window, or `NULL` if an + * [error](@ref error_handling) occurred. + * + * @thread_safety This function may be called from any thread. Access is not + * synchronized. + * + * @since Added in version 3.3. + * + * @ingroup native + */ +GLFWAPI OSMesaContext glfwGetOSMesaContext(GLFWwindow* window); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _glfw3_native_h_ */ + diff --git a/stb/stb_vorbis.h b/stb/stb_vorbis.h new file mode 100644 index 0000000..2f62db2 --- /dev/null +++ b/stb/stb_vorbis.h @@ -0,0 +1,5558 @@ +// Ogg Vorbis audio decoder - v1.19 - public domain +// http://nothings.org/stb_vorbis/ +// +// Original version written by Sean Barrett in 2007. +// +// Originally sponsored by RAD Game Tools. Seeking implementation +// sponsored by Phillip Bennefall, Marc Andersen, Aaron Baker, +// Elias Software, Aras Pranckevicius, and Sean Barrett. +// +// LICENSE +// +// See end of file for license information. +// +// Limitations: +// +// - floor 0 not supported (used in old ogg vorbis files pre-2004) +// - lossless sample-truncation at beginning ignored +// - cannot concatenate multiple vorbis streams +// - sample positions are 32-bit, limiting seekable 192Khz +// files to around 6 hours (Ogg supports 64-bit) +// +// Feature contributors: +// Dougall Johnson (sample-exact seeking) +// +// Bugfix/warning contributors: +// Terje Mathisen Niklas Frykholm Andy Hill +// Casey Muratori John Bolton Gargaj +// Laurent Gomila Marc LeBlanc Ronny Chevalier +// Bernhard Wodo Evan Balster github:alxprd +// Tom Beaumont Ingo Leitgeb Nicolas Guillemot +// Phillip Bennefall Rohit Thiago Goulart +// github:manxorist saga musix github:infatum +// Timur Gagiev Maxwell Koo Peter Waller +// github:audinowho Dougall Johnson +// +// Partial history: +// 1.19 - 2020-02-05 - warnings +// 1.18 - 2020-02-02 - fix seek bugs; parse header comments; misc warnings etc. +// 1.17 - 2019-07-08 - fix CVE-2019-13217..CVE-2019-13223 (by ForAllSecure) +// 1.16 - 2019-03-04 - fix warnings +// 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found +// 1.14 - 2018-02-11 - delete bogus dealloca usage +// 1.13 - 2018-01-29 - fix truncation of last frame (hopefully) +// 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files +// 1.11 - 2017-07-23 - fix MinGW compilation +// 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory +// 1.09 - 2016-04-04 - back out 'truncation of last frame' fix from previous version +// 1.08 - 2016-04-02 - warnings; setup memory leaks; truncation of last frame +// 1.07 - 2015-01-16 - fixes for crashes on invalid files; warning fixes; const +// 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson) +// some crash fixes when out of memory or with corrupt files +// fix some inappropriately signed shifts +// 1.05 - 2015-04-19 - don't define __forceinline if it's redundant +// 1.04 - 2014-08-27 - fix missing const-correct case in API +// 1.03 - 2014-08-07 - warning fixes +// 1.02 - 2014-07-09 - declare qsort comparison as explicitly _cdecl in Windows +// 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float (interleaved was correct) +// 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in >2-channel; +// (API change) report sample rate for decode-full-file funcs +// +// See end of file for full version history. + + +////////////////////////////////////////////////////////////////////////////// +// +// HEADER BEGINS HERE +// + +#ifndef STB_VORBIS_INCLUDE_STB_VORBIS_H +#define STB_VORBIS_INCLUDE_STB_VORBIS_H + +#if defined(STB_VORBIS_NO_CRT) && !defined(STB_VORBIS_NO_STDIO) +#define STB_VORBIS_NO_STDIO 1 +#endif + +#ifndef STB_VORBIS_NO_STDIO +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/////////// THREAD SAFETY + +// Individual stb_vorbis* handles are not thread-safe; you cannot decode from +// them from multiple threads at the same time. However, you can have multiple +// stb_vorbis* handles and decode from them independently in multiple thrads. + + +/////////// MEMORY ALLOCATION + +// normally stb_vorbis uses malloc() to allocate memory at startup, +// and alloca() to allocate temporary memory during a frame on the +// stack. (Memory consumption will depend on the amount of setup +// data in the file and how you set the compile flags for speed +// vs. size. In my test files the maximal-size usage is ~150KB.) +// +// You can modify the wrapper functions in the source (setup_malloc, +// setup_temp_malloc, temp_malloc) to change this behavior, or you +// can use a simpler allocation model: you pass in a buffer from +// which stb_vorbis will allocate _all_ its memory (including the +// temp memory). "open" may fail with a VORBIS_outofmem if you +// do not pass in enough data; there is no way to determine how +// much you do need except to succeed (at which point you can +// query get_info to find the exact amount required. yes I know +// this is lame). +// +// If you pass in a non-NULL buffer of the type below, allocation +// will occur from it as described above. Otherwise just pass NULL +// to use malloc()/alloca() + +typedef struct +{ + char *alloc_buffer; + int alloc_buffer_length_in_bytes; +} stb_vorbis_alloc; + + +/////////// FUNCTIONS USEABLE WITH ALL INPUT MODES + +typedef struct stb_vorbis stb_vorbis; + +typedef struct +{ + unsigned int sample_rate; + int channels; + + unsigned int setup_memory_required; + unsigned int setup_temp_memory_required; + unsigned int temp_memory_required; + + int max_frame_size; +} stb_vorbis_info; + +typedef struct +{ + char *vendor; + + int comment_list_length; + char **comment_list; +} stb_vorbis_comment; + +// get general information about the file +extern stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f); + +// get ogg comments +extern stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f); + +// get the last error detected (clears it, too) +extern int stb_vorbis_get_error(stb_vorbis *f); + +// close an ogg vorbis file and free all memory in use +extern void stb_vorbis_close(stb_vorbis *f); + +// this function returns the offset (in samples) from the beginning of the +// file that will be returned by the next decode, if it is known, or -1 +// otherwise. after a flush_pushdata() call, this may take a while before +// it becomes valid again. +// NOT WORKING YET after a seek with PULLDATA API +extern int stb_vorbis_get_sample_offset(stb_vorbis *f); + +// returns the current seek point within the file, or offset from the beginning +// of the memory buffer. In pushdata mode it returns 0. +extern unsigned int stb_vorbis_get_file_offset(stb_vorbis *f); + +/////////// PUSHDATA API + +#ifndef STB_VORBIS_NO_PUSHDATA_API + +// this API allows you to get blocks of data from any source and hand +// them to stb_vorbis. you have to buffer them; stb_vorbis will tell +// you how much it used, and you have to give it the rest next time; +// and stb_vorbis may not have enough data to work with and you will +// need to give it the same data again PLUS more. Note that the Vorbis +// specification does not bound the size of an individual frame. + +extern stb_vorbis *stb_vorbis_open_pushdata( + const unsigned char * datablock, int datablock_length_in_bytes, + int *datablock_memory_consumed_in_bytes, + int *error, + const stb_vorbis_alloc *alloc_buffer); +// create a vorbis decoder by passing in the initial data block containing +// the ogg&vorbis headers (you don't need to do parse them, just provide +// the first N bytes of the file--you're told if it's not enough, see below) +// on success, returns an stb_vorbis *, does not set error, returns the amount of +// data parsed/consumed on this call in *datablock_memory_consumed_in_bytes; +// on failure, returns NULL on error and sets *error, does not change *datablock_memory_consumed +// if returns NULL and *error is VORBIS_need_more_data, then the input block was +// incomplete and you need to pass in a larger block from the start of the file + +extern int stb_vorbis_decode_frame_pushdata( + stb_vorbis *f, + const unsigned char *datablock, int datablock_length_in_bytes, + int *channels, // place to write number of float * buffers + float ***output, // place to write float ** array of float * buffers + int *samples // place to write number of output samples + ); +// decode a frame of audio sample data if possible from the passed-in data block +// +// return value: number of bytes we used from datablock +// +// possible cases: +// 0 bytes used, 0 samples output (need more data) +// N bytes used, 0 samples output (resynching the stream, keep going) +// N bytes used, M samples output (one frame of data) +// note that after opening a file, you will ALWAYS get one N-bytes,0-sample +// frame, because Vorbis always "discards" the first frame. +// +// Note that on resynch, stb_vorbis will rarely consume all of the buffer, +// instead only datablock_length_in_bytes-3 or less. This is because it wants +// to avoid missing parts of a page header if they cross a datablock boundary, +// without writing state-machiney code to record a partial detection. +// +// The number of channels returned are stored in *channels (which can be +// NULL--it is always the same as the number of channels reported by +// get_info). *output will contain an array of float* buffers, one per +// channel. In other words, (*output)[0][0] contains the first sample from +// the first channel, and (*output)[1][0] contains the first sample from +// the second channel. + +extern void stb_vorbis_flush_pushdata(stb_vorbis *f); +// inform stb_vorbis that your next datablock will not be contiguous with +// previous ones (e.g. you've seeked in the data); future attempts to decode +// frames will cause stb_vorbis to resynchronize (as noted above), and +// once it sees a valid Ogg page (typically 4-8KB, as large as 64KB), it +// will begin decoding the _next_ frame. +// +// if you want to seek using pushdata, you need to seek in your file, then +// call stb_vorbis_flush_pushdata(), then start calling decoding, then once +// decoding is returning you data, call stb_vorbis_get_sample_offset, and +// if you don't like the result, seek your file again and repeat. +#endif + + +////////// PULLING INPUT API + +#ifndef STB_VORBIS_NO_PULLDATA_API +// This API assumes stb_vorbis is allowed to pull data from a source-- +// either a block of memory containing the _entire_ vorbis stream, or a +// FILE * that you or it create, or possibly some other reading mechanism +// if you go modify the source to replace the FILE * case with some kind +// of callback to your code. (But if you don't support seeking, you may +// just want to go ahead and use pushdata.) + +#if !defined(STB_VORBIS_NO_STDIO) && !defined(STB_VORBIS_NO_INTEGER_CONVERSION) +extern int stb_vorbis_decode_filename(const char *filename, int *channels, int *sample_rate, short **output); +#endif +#if !defined(STB_VORBIS_NO_INTEGER_CONVERSION) +extern int stb_vorbis_decode_memory(const unsigned char *mem, int len, int *channels, int *sample_rate, short **output); +#endif +// decode an entire file and output the data interleaved into a malloc()ed +// buffer stored in *output. The return value is the number of samples +// decoded, or -1 if the file could not be opened or was not an ogg vorbis file. +// When you're done with it, just free() the pointer returned in *output. + +extern stb_vorbis * stb_vorbis_open_memory(const unsigned char *data, int len, + int *error, const stb_vorbis_alloc *alloc_buffer); +// create an ogg vorbis decoder from an ogg vorbis stream in memory (note +// this must be the entire stream!). on failure, returns NULL and sets *error + +#ifndef STB_VORBIS_NO_STDIO +extern stb_vorbis * stb_vorbis_open_filename(const char *filename, + int *error, const stb_vorbis_alloc *alloc_buffer); +// create an ogg vorbis decoder from a filename via fopen(). on failure, +// returns NULL and sets *error (possibly to VORBIS_file_open_failure). + +extern stb_vorbis * stb_vorbis_open_file(FILE *f, int close_handle_on_close, + int *error, const stb_vorbis_alloc *alloc_buffer); +// create an ogg vorbis decoder from an open FILE *, looking for a stream at +// the _current_ seek point (ftell). on failure, returns NULL and sets *error. +// note that stb_vorbis must "own" this stream; if you seek it in between +// calls to stb_vorbis, it will become confused. Moreover, if you attempt to +// perform stb_vorbis_seek_*() operations on this file, it will assume it +// owns the _entire_ rest of the file after the start point. Use the next +// function, stb_vorbis_open_file_section(), to limit it. + +extern stb_vorbis * stb_vorbis_open_file_section(FILE *f, int close_handle_on_close, + int *error, const stb_vorbis_alloc *alloc_buffer, unsigned int len); +// create an ogg vorbis decoder from an open FILE *, looking for a stream at +// the _current_ seek point (ftell); the stream will be of length 'len' bytes. +// on failure, returns NULL and sets *error. note that stb_vorbis must "own" +// this stream; if you seek it in between calls to stb_vorbis, it will become +// confused. +#endif + +extern int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number); +extern int stb_vorbis_seek(stb_vorbis *f, unsigned int sample_number); +// these functions seek in the Vorbis file to (approximately) 'sample_number'. +// after calling seek_frame(), the next call to get_frame_*() will include +// the specified sample. after calling stb_vorbis_seek(), the next call to +// stb_vorbis_get_samples_* will start with the specified sample. If you +// do not need to seek to EXACTLY the target sample when using get_samples_*, +// you can also use seek_frame(). + +extern int stb_vorbis_seek_start(stb_vorbis *f); +// this function is equivalent to stb_vorbis_seek(f,0) + +extern unsigned int stb_vorbis_stream_length_in_samples(stb_vorbis *f); +extern float stb_vorbis_stream_length_in_seconds(stb_vorbis *f); +// these functions return the total length of the vorbis stream + +extern int stb_vorbis_get_frame_float(stb_vorbis *f, int *channels, float ***output); +// decode the next frame and return the number of samples. the number of +// channels returned are stored in *channels (which can be NULL--it is always +// the same as the number of channels reported by get_info). *output will +// contain an array of float* buffers, one per channel. These outputs will +// be overwritten on the next call to stb_vorbis_get_frame_*. +// +// You generally should not intermix calls to stb_vorbis_get_frame_*() +// and stb_vorbis_get_samples_*(), since the latter calls the former. + +#ifndef STB_VORBIS_NO_INTEGER_CONVERSION +extern int stb_vorbis_get_frame_short_interleaved(stb_vorbis *f, int num_c, short *buffer, int num_shorts); +extern int stb_vorbis_get_frame_short (stb_vorbis *f, int num_c, short **buffer, int num_samples); +#endif +// decode the next frame and return the number of *samples* per channel. +// Note that for interleaved data, you pass in the number of shorts (the +// size of your array), but the return value is the number of samples per +// channel, not the total number of samples. +// +// The data is coerced to the number of channels you request according to the +// channel coercion rules (see below). You must pass in the size of your +// buffer(s) so that stb_vorbis will not overwrite the end of the buffer. +// The maximum buffer size needed can be gotten from get_info(); however, +// the Vorbis I specification implies an absolute maximum of 4096 samples +// per channel. + +// Channel coercion rules: +// Let M be the number of channels requested, and N the number of channels present, +// and Cn be the nth channel; let stereo L be the sum of all L and center channels, +// and stereo R be the sum of all R and center channels (channel assignment from the +// vorbis spec). +// M N output +// 1 k sum(Ck) for all k +// 2 * stereo L, stereo R +// k l k > l, the first l channels, then 0s +// k l k <= l, the first k channels +// Note that this is not _good_ surround etc. mixing at all! It's just so +// you get something useful. + +extern int stb_vorbis_get_samples_float_interleaved(stb_vorbis *f, int channels, float *buffer, int num_floats); +extern int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, int num_samples); +// gets num_samples samples, not necessarily on a frame boundary--this requires +// buffering so you have to supply the buffers. DOES NOT APPLY THE COERCION RULES. +// Returns the number of samples stored per channel; it may be less than requested +// at the end of the file. If there are no more samples in the file, returns 0. + +#ifndef STB_VORBIS_NO_INTEGER_CONVERSION +extern int stb_vorbis_get_samples_short_interleaved(stb_vorbis *f, int channels, short *buffer, int num_shorts); +extern int stb_vorbis_get_samples_short(stb_vorbis *f, int channels, short **buffer, int num_samples); +#endif +// gets num_samples samples, not necessarily on a frame boundary--this requires +// buffering so you have to supply the buffers. Applies the coercion rules above +// to produce 'channels' channels. Returns the number of samples stored per channel; +// it may be less than requested at the end of the file. If there are no more +// samples in the file, returns 0. + +#endif + +//////// ERROR CODES + +enum STBVorbisError +{ + VORBIS__no_error, + + VORBIS_need_more_data=1, // not a real error + + VORBIS_invalid_api_mixing, // can't mix API modes + VORBIS_outofmem, // not enough memory + VORBIS_feature_not_supported, // uses floor 0 + VORBIS_too_many_channels, // STB_VORBIS_MAX_CHANNELS is too small + VORBIS_file_open_failure, // fopen() failed + VORBIS_seek_without_length, // can't seek in unknown-length file + + VORBIS_unexpected_eof=10, // file is truncated? + VORBIS_seek_invalid, // seek past EOF + + // decoding errors (corrupt/invalid stream) -- you probably + // don't care about the exact details of these + + // vorbis errors: + VORBIS_invalid_setup=20, + VORBIS_invalid_stream, + + // ogg errors: + VORBIS_missing_capture_pattern=30, + VORBIS_invalid_stream_structure_version, + VORBIS_continued_packet_flag_invalid, + VORBIS_incorrect_stream_serial_number, + VORBIS_invalid_first_page, + VORBIS_bad_packet_type, + VORBIS_cant_find_last_page, + VORBIS_seek_failed, + VORBIS_ogg_skeleton_not_supported +}; + + +#ifdef __cplusplus +} +#endif + +#endif // STB_VORBIS_INCLUDE_STB_VORBIS_H +// +// HEADER ENDS HERE +// +////////////////////////////////////////////////////////////////////////////// + +#ifndef STB_VORBIS_HEADER_ONLY + +// global configuration settings (e.g. set these in the project/makefile), +// or just set them in this file at the top (although ideally the first few +// should be visible when the header file is compiled too, although it's not +// crucial) + +// STB_VORBIS_NO_PUSHDATA_API +// does not compile the code for the various stb_vorbis_*_pushdata() +// functions +// #define STB_VORBIS_NO_PUSHDATA_API + +// STB_VORBIS_NO_PULLDATA_API +// does not compile the code for the non-pushdata APIs +// #define STB_VORBIS_NO_PULLDATA_API + +// STB_VORBIS_NO_STDIO +// does not compile the code for the APIs that use FILE *s internally +// or externally (implied by STB_VORBIS_NO_PULLDATA_API) +// #define STB_VORBIS_NO_STDIO + +// STB_VORBIS_NO_INTEGER_CONVERSION +// does not compile the code for converting audio sample data from +// float to integer (implied by STB_VORBIS_NO_PULLDATA_API) +// #define STB_VORBIS_NO_INTEGER_CONVERSION + +// STB_VORBIS_NO_FAST_SCALED_FLOAT +// does not use a fast float-to-int trick to accelerate float-to-int on +// most platforms which requires endianness be defined correctly. +//#define STB_VORBIS_NO_FAST_SCALED_FLOAT + + +// STB_VORBIS_MAX_CHANNELS [number] +// globally define this to the maximum number of channels you need. +// The spec does not put a restriction on channels except that +// the count is stored in a byte, so 255 is the hard limit. +// Reducing this saves about 16 bytes per value, so using 16 saves +// (255-16)*16 or around 4KB. Plus anything other memory usage +// I forgot to account for. Can probably go as low as 8 (7.1 audio), +// 6 (5.1 audio), or 2 (stereo only). +#ifndef STB_VORBIS_MAX_CHANNELS +#define STB_VORBIS_MAX_CHANNELS 16 // enough for anyone? +#endif + +// STB_VORBIS_PUSHDATA_CRC_COUNT [number] +// after a flush_pushdata(), stb_vorbis begins scanning for the +// next valid page, without backtracking. when it finds something +// that looks like a page, it streams through it and verifies its +// CRC32. Should that validation fail, it keeps scanning. But it's +// possible that _while_ streaming through to check the CRC32 of +// one candidate page, it sees another candidate page. This #define +// determines how many "overlapping" candidate pages it can search +// at once. Note that "real" pages are typically ~4KB to ~8KB, whereas +// garbage pages could be as big as 64KB, but probably average ~16KB. +// So don't hose ourselves by scanning an apparent 64KB page and +// missing a ton of real ones in the interim; so minimum of 2 +#ifndef STB_VORBIS_PUSHDATA_CRC_COUNT +#define STB_VORBIS_PUSHDATA_CRC_COUNT 4 +#endif + +// STB_VORBIS_FAST_HUFFMAN_LENGTH [number] +// sets the log size of the huffman-acceleration table. Maximum +// supported value is 24. with larger numbers, more decodings are O(1), +// but the table size is larger so worse cache missing, so you'll have +// to probe (and try multiple ogg vorbis files) to find the sweet spot. +#ifndef STB_VORBIS_FAST_HUFFMAN_LENGTH +#define STB_VORBIS_FAST_HUFFMAN_LENGTH 10 +#endif + +// STB_VORBIS_FAST_BINARY_LENGTH [number] +// sets the log size of the binary-search acceleration table. this +// is used in similar fashion to the fast-huffman size to set initial +// parameters for the binary search + +// STB_VORBIS_FAST_HUFFMAN_INT +// The fast huffman tables are much more efficient if they can be +// stored as 16-bit results instead of 32-bit results. This restricts +// the codebooks to having only 65535 possible outcomes, though. +// (At least, accelerated by the huffman table.) +#ifndef STB_VORBIS_FAST_HUFFMAN_INT +#define STB_VORBIS_FAST_HUFFMAN_SHORT +#endif + +// STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH +// If the 'fast huffman' search doesn't succeed, then stb_vorbis falls +// back on binary searching for the correct one. This requires storing +// extra tables with the huffman codes in sorted order. Defining this +// symbol trades off space for speed by forcing a linear search in the +// non-fast case, except for "sparse" codebooks. +// #define STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH + +// STB_VORBIS_DIVIDES_IN_RESIDUE +// stb_vorbis precomputes the result of the scalar residue decoding +// that would otherwise require a divide per chunk. you can trade off +// space for time by defining this symbol. +// #define STB_VORBIS_DIVIDES_IN_RESIDUE + +// STB_VORBIS_DIVIDES_IN_CODEBOOK +// vorbis VQ codebooks can be encoded two ways: with every case explicitly +// stored, or with all elements being chosen from a small range of values, +// and all values possible in all elements. By default, stb_vorbis expands +// this latter kind out to look like the former kind for ease of decoding, +// because otherwise an integer divide-per-vector-element is required to +// unpack the index. If you define STB_VORBIS_DIVIDES_IN_CODEBOOK, you can +// trade off storage for speed. +//#define STB_VORBIS_DIVIDES_IN_CODEBOOK + +#ifdef STB_VORBIS_CODEBOOK_SHORTS +#error "STB_VORBIS_CODEBOOK_SHORTS is no longer supported as it produced incorrect results for some input formats" +#endif + +// STB_VORBIS_DIVIDE_TABLE +// this replaces small integer divides in the floor decode loop with +// table lookups. made less than 1% difference, so disabled by default. + +// STB_VORBIS_NO_INLINE_DECODE +// disables the inlining of the scalar codebook fast-huffman decode. +// might save a little codespace; useful for debugging +// #define STB_VORBIS_NO_INLINE_DECODE + +// STB_VORBIS_NO_DEFER_FLOOR +// Normally we only decode the floor without synthesizing the actual +// full curve. We can instead synthesize the curve immediately. This +// requires more memory and is very likely slower, so I don't think +// you'd ever want to do it except for debugging. +// #define STB_VORBIS_NO_DEFER_FLOOR + + + + +////////////////////////////////////////////////////////////////////////////// + +#ifdef STB_VORBIS_NO_PULLDATA_API + #define STB_VORBIS_NO_INTEGER_CONVERSION + #define STB_VORBIS_NO_STDIO +#endif + +#if defined(STB_VORBIS_NO_CRT) && !defined(STB_VORBIS_NO_STDIO) + #define STB_VORBIS_NO_STDIO 1 +#endif + +#ifndef STB_VORBIS_NO_INTEGER_CONVERSION +#ifndef STB_VORBIS_NO_FAST_SCALED_FLOAT + + // only need endianness for fast-float-to-int, which we don't + // use for pushdata + + #ifndef STB_VORBIS_BIG_ENDIAN + #define STB_VORBIS_ENDIAN 0 + #else + #define STB_VORBIS_ENDIAN 1 + #endif + +#endif +#endif + + +#ifndef STB_VORBIS_NO_STDIO +#include +#endif + +#ifndef STB_VORBIS_NO_CRT + #include + #include + #include + #include + + // find definition of alloca if it's not in stdlib.h: + #if defined(_MSC_VER) || defined(__MINGW32__) + #include + #endif + #if defined(__linux__) || defined(__linux) || defined(__EMSCRIPTEN__) + #include + #endif +#else // STB_VORBIS_NO_CRT + #define NULL 0 + #define malloc(s) 0 + #define free(s) ((void) 0) + #define realloc(s) 0 +#endif // STB_VORBIS_NO_CRT + +#include + +#ifdef __MINGW32__ + // eff you mingw: + // "fixed": + // http://sourceforge.net/p/mingw-w64/mailman/message/32882927/ + // "no that broke the build, reverted, who cares about C": + // http://sourceforge.net/p/mingw-w64/mailman/message/32890381/ + #ifdef __forceinline + #undef __forceinline + #endif + #define __forceinline + #ifdef alloca + #undef alloca + #endif + #define alloca __builtin_alloca +#elif !defined(_MSC_VER) + #if __GNUC__ + #define __forceinline inline + #else + #define __forceinline + #endif +#endif + +#if STB_VORBIS_MAX_CHANNELS > 256 +#error "Value of STB_VORBIS_MAX_CHANNELS outside of allowed range" +#endif + +#if STB_VORBIS_FAST_HUFFMAN_LENGTH > 24 +#error "Value of STB_VORBIS_FAST_HUFFMAN_LENGTH outside of allowed range" +#endif + + +#if 0 +#include +#define CHECK(f) _CrtIsValidHeapPointer(f->channel_buffers[1]) +#else +#define CHECK(f) ((void) 0) +#endif + +#define MAX_BLOCKSIZE_LOG 13 // from specification +#define MAX_BLOCKSIZE (1 << MAX_BLOCKSIZE_LOG) + + +typedef unsigned char uint8; +typedef signed char int8; +typedef unsigned short uint16; +typedef signed short int16; +typedef unsigned int uint32; +typedef signed int int32; + +#ifndef TRUE +#define TRUE 1 +#define FALSE 0 +#endif + +typedef float codetype; + +// @NOTE +// +// Some arrays below are tagged "//varies", which means it's actually +// a variable-sized piece of data, but rather than malloc I assume it's +// small enough it's better to just allocate it all together with the +// main thing +// +// Most of the variables are specified with the smallest size I could pack +// them into. It might give better performance to make them all full-sized +// integers. It should be safe to freely rearrange the structures or change +// the sizes larger--nothing relies on silently truncating etc., nor the +// order of variables. + +#define FAST_HUFFMAN_TABLE_SIZE (1 << STB_VORBIS_FAST_HUFFMAN_LENGTH) +#define FAST_HUFFMAN_TABLE_MASK (FAST_HUFFMAN_TABLE_SIZE - 1) + +typedef struct +{ + int dimensions, entries; + uint8 *codeword_lengths; + float minimum_value; + float delta_value; + uint8 value_bits; + uint8 lookup_type; + uint8 sequence_p; + uint8 sparse; + uint32 lookup_values; + codetype *multiplicands; + uint32 *codewords; + #ifdef STB_VORBIS_FAST_HUFFMAN_SHORT + int16 fast_huffman[FAST_HUFFMAN_TABLE_SIZE]; + #else + int32 fast_huffman[FAST_HUFFMAN_TABLE_SIZE]; + #endif + uint32 *sorted_codewords; + int *sorted_values; + int sorted_entries; +} Codebook; + +typedef struct +{ + uint8 order; + uint16 rate; + uint16 bark_map_size; + uint8 amplitude_bits; + uint8 amplitude_offset; + uint8 number_of_books; + uint8 book_list[16]; // varies +} Floor0; + +typedef struct +{ + uint8 partitions; + uint8 partition_class_list[32]; // varies + uint8 class_dimensions[16]; // varies + uint8 class_subclasses[16]; // varies + uint8 class_masterbooks[16]; // varies + int16 subclass_books[16][8]; // varies + uint16 Xlist[31*8+2]; // varies + uint8 sorted_order[31*8+2]; + uint8 neighbors[31*8+2][2]; + uint8 floor1_multiplier; + uint8 rangebits; + int values; +} Floor1; + +typedef union +{ + Floor0 floor0; + Floor1 floor1; +} Floor; + +typedef struct +{ + uint32 begin, end; + uint32 part_size; + uint8 classifications; + uint8 classbook; + uint8 **classdata; + int16 (*residue_books)[8]; +} Residue; + +typedef struct +{ + uint8 magnitude; + uint8 angle; + uint8 mux; +} MappingChannel; + +typedef struct +{ + uint16 coupling_steps; + MappingChannel *chan; + uint8 submaps; + uint8 submap_floor[15]; // varies + uint8 submap_residue[15]; // varies +} Mapping; + +typedef struct +{ + uint8 blockflag; + uint8 mapping; + uint16 windowtype; + uint16 transformtype; +} Mode; + +typedef struct +{ + uint32 goal_crc; // expected crc if match + int bytes_left; // bytes left in packet + uint32 crc_so_far; // running crc + int bytes_done; // bytes processed in _current_ chunk + uint32 sample_loc; // granule pos encoded in page +} CRCscan; + +typedef struct +{ + uint32 page_start, page_end; + uint32 last_decoded_sample; +} ProbedPage; + +struct stb_vorbis +{ + // user-accessible info + unsigned int sample_rate; + int channels; + + unsigned int setup_memory_required; + unsigned int temp_memory_required; + unsigned int setup_temp_memory_required; + + char *vendor; + int comment_list_length; + char **comment_list; + + // input config +#ifndef STB_VORBIS_NO_STDIO + FILE *f; + uint32 f_start; + int close_on_free; +#endif + + uint8 *stream; + uint8 *stream_start; + uint8 *stream_end; + + uint32 stream_len; + + uint8 push_mode; + + // the page to seek to when seeking to start, may be zero + uint32 first_audio_page_offset; + + // p_first is the page on which the first audio packet ends + // (but not necessarily the page on which it starts) + ProbedPage p_first, p_last; + + // memory management + stb_vorbis_alloc alloc; + int setup_offset; + int temp_offset; + + // run-time results + int eof; + enum STBVorbisError error; + + // user-useful data + + // header info + int blocksize[2]; + int blocksize_0, blocksize_1; + int codebook_count; + Codebook *codebooks; + int floor_count; + uint16 floor_types[64]; // varies + Floor *floor_config; + int residue_count; + uint16 residue_types[64]; // varies + Residue *residue_config; + int mapping_count; + Mapping *mapping; + int mode_count; + Mode mode_config[64]; // varies + + uint32 total_samples; + + // decode buffer + float *channel_buffers[STB_VORBIS_MAX_CHANNELS]; + float *outputs [STB_VORBIS_MAX_CHANNELS]; + + float *previous_window[STB_VORBIS_MAX_CHANNELS]; + int previous_length; + + #ifndef STB_VORBIS_NO_DEFER_FLOOR + int16 *finalY[STB_VORBIS_MAX_CHANNELS]; + #else + float *floor_buffers[STB_VORBIS_MAX_CHANNELS]; + #endif + + uint32 current_loc; // sample location of next frame to decode + int current_loc_valid; + + // per-blocksize precomputed data + + // twiddle factors + float *A[2],*B[2],*C[2]; + float *window[2]; + uint16 *bit_reverse[2]; + + // current page/packet/segment streaming info + uint32 serial; // stream serial number for verification + int last_page; + int segment_count; + uint8 segments[255]; + uint8 page_flag; + uint8 bytes_in_seg; + uint8 first_decode; + int next_seg; + int last_seg; // flag that we're on the last segment + int last_seg_which; // what was the segment number of the last seg? + uint32 acc; + int valid_bits; + int packet_bytes; + int end_seg_with_known_loc; + uint32 known_loc_for_packet; + int discard_samples_deferred; + uint32 samples_output; + + // push mode scanning + int page_crc_tests; // only in push_mode: number of tests active; -1 if not searching +#ifndef STB_VORBIS_NO_PUSHDATA_API + CRCscan scan[STB_VORBIS_PUSHDATA_CRC_COUNT]; +#endif + + // sample-access + int channel_buffer_start; + int channel_buffer_end; +}; + +#if defined(STB_VORBIS_NO_PUSHDATA_API) + #define IS_PUSH_MODE(f) FALSE +#elif defined(STB_VORBIS_NO_PULLDATA_API) + #define IS_PUSH_MODE(f) TRUE +#else + #define IS_PUSH_MODE(f) ((f)->push_mode) +#endif + +typedef struct stb_vorbis vorb; + +static int error(vorb *f, enum STBVorbisError e) +{ + f->error = e; + if (!f->eof && e != VORBIS_need_more_data) { + f->error=e; // breakpoint for debugging + } + return 0; +} + + +// these functions are used for allocating temporary memory +// while decoding. if you can afford the stack space, use +// alloca(); otherwise, provide a temp buffer and it will +// allocate out of those. + +#define array_size_required(count,size) (count*(sizeof(void *)+(size))) + +#define temp_alloc(f,size) (f->alloc.alloc_buffer ? setup_temp_malloc(f,size) : alloca(size)) +#define temp_free(f,p) (void)0 +#define temp_alloc_save(f) ((f)->temp_offset) +#define temp_alloc_restore(f,p) ((f)->temp_offset = (p)) + +#define temp_block_array(f,count,size) make_block_array(temp_alloc(f,array_size_required(count,size)), count, size) + +// given a sufficiently large block of memory, make an array of pointers to subblocks of it +static void *make_block_array(void *mem, int count, int size) +{ + int i; + void ** p = (void **) mem; + char *q = (char *) (p + count); + for (i=0; i < count; ++i) { + p[i] = q; + q += size; + } + return p; +} + +static void *setup_malloc(vorb *f, int sz) +{ + sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs. + f->setup_memory_required += sz; + if (f->alloc.alloc_buffer) { + void *p = (char *) f->alloc.alloc_buffer + f->setup_offset; + if (f->setup_offset + sz > f->temp_offset) return NULL; + f->setup_offset += sz; + return p; + } + return sz ? malloc(sz) : NULL; +} + +static void setup_free(vorb *f, void *p) +{ + if (f->alloc.alloc_buffer) return; // do nothing; setup mem is a stack + free(p); +} + +static void *setup_temp_malloc(vorb *f, int sz) +{ + sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs. + if (f->alloc.alloc_buffer) { + if (f->temp_offset - sz < f->setup_offset) return NULL; + f->temp_offset -= sz; + return (char *) f->alloc.alloc_buffer + f->temp_offset; + } + return malloc(sz); +} + +static void setup_temp_free(vorb *f, void *p, int sz) +{ + if (f->alloc.alloc_buffer) { + f->temp_offset += (sz+3)&~3; + return; + } + free(p); +} + +#define CRC32_POLY 0x04c11db7 // from spec + +static uint32 crc_table[256]; +static void crc32_init(void) +{ + int i,j; + uint32 s; + for(i=0; i < 256; i++) { + for (s=(uint32) i << 24, j=0; j < 8; ++j) + s = (s << 1) ^ (s >= (1U<<31) ? CRC32_POLY : 0); + crc_table[i] = s; + } +} + +static __forceinline uint32 crc32_update(uint32 crc, uint8 byte) +{ + return (crc << 8) ^ crc_table[byte ^ (crc >> 24)]; +} + + +// used in setup, and for huffman that doesn't go fast path +static unsigned int bit_reverse(unsigned int n) +{ + n = ((n & 0xAAAAAAAA) >> 1) | ((n & 0x55555555) << 1); + n = ((n & 0xCCCCCCCC) >> 2) | ((n & 0x33333333) << 2); + n = ((n & 0xF0F0F0F0) >> 4) | ((n & 0x0F0F0F0F) << 4); + n = ((n & 0xFF00FF00) >> 8) | ((n & 0x00FF00FF) << 8); + return (n >> 16) | (n << 16); +} + +static float square(float x) +{ + return x*x; +} + +// this is a weird definition of log2() for which log2(1) = 1, log2(2) = 2, log2(4) = 3 +// as required by the specification. fast(?) implementation from stb.h +// @OPTIMIZE: called multiple times per-packet with "constants"; move to setup +static int ilog(int32 n) +{ + static signed char log2_4[16] = { 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4 }; + + if (n < 0) return 0; // signed n returns 0 + + // 2 compares if n < 16, 3 compares otherwise (4 if signed or n > 1<<29) + if (n < (1 << 14)) + if (n < (1 << 4)) return 0 + log2_4[n ]; + else if (n < (1 << 9)) return 5 + log2_4[n >> 5]; + else return 10 + log2_4[n >> 10]; + else if (n < (1 << 24)) + if (n < (1 << 19)) return 15 + log2_4[n >> 15]; + else return 20 + log2_4[n >> 20]; + else if (n < (1 << 29)) return 25 + log2_4[n >> 25]; + else return 30 + log2_4[n >> 30]; +} + +#ifndef M_PI + #define M_PI 3.14159265358979323846264f // from CRC +#endif + +// code length assigned to a value with no huffman encoding +#define NO_CODE 255 + +/////////////////////// LEAF SETUP FUNCTIONS ////////////////////////// +// +// these functions are only called at setup, and only a few times +// per file + +static float float32_unpack(uint32 x) +{ + // from the specification + uint32 mantissa = x & 0x1fffff; + uint32 sign = x & 0x80000000; + uint32 exp = (x & 0x7fe00000) >> 21; + double res = sign ? -(double)mantissa : (double)mantissa; + return (float) ldexp((float)res, exp-788); +} + + +// zlib & jpeg huffman tables assume that the output symbols +// can either be arbitrarily arranged, or have monotonically +// increasing frequencies--they rely on the lengths being sorted; +// this makes for a very simple generation algorithm. +// vorbis allows a huffman table with non-sorted lengths. This +// requires a more sophisticated construction, since symbols in +// order do not map to huffman codes "in order". +static void add_entry(Codebook *c, uint32 huff_code, int symbol, int count, int len, uint32 *values) +{ + if (!c->sparse) { + c->codewords [symbol] = huff_code; + } else { + c->codewords [count] = huff_code; + c->codeword_lengths[count] = len; + values [count] = symbol; + } +} + +static int compute_codewords(Codebook *c, uint8 *len, int n, uint32 *values) +{ + int i,k,m=0; + uint32 available[32]; + + memset(available, 0, sizeof(available)); + // find the first entry + for (k=0; k < n; ++k) if (len[k] < NO_CODE) break; + if (k == n) { assert(c->sorted_entries == 0); return TRUE; } + // add to the list + add_entry(c, 0, k, m++, len[k], values); + // add all available leaves + for (i=1; i <= len[k]; ++i) + available[i] = 1U << (32-i); + // note that the above code treats the first case specially, + // but it's really the same as the following code, so they + // could probably be combined (except the initial code is 0, + // and I use 0 in available[] to mean 'empty') + for (i=k+1; i < n; ++i) { + uint32 res; + int z = len[i], y; + if (z == NO_CODE) continue; + // find lowest available leaf (should always be earliest, + // which is what the specification calls for) + // note that this property, and the fact we can never have + // more than one free leaf at a given level, isn't totally + // trivial to prove, but it seems true and the assert never + // fires, so! + while (z > 0 && !available[z]) --z; + if (z == 0) { return FALSE; } + res = available[z]; + assert(z >= 0 && z < 32); + available[z] = 0; + add_entry(c, bit_reverse(res), i, m++, len[i], values); + // propagate availability up the tree + if (z != len[i]) { + assert(len[i] >= 0 && len[i] < 32); + for (y=len[i]; y > z; --y) { + assert(available[y] == 0); + available[y] = res + (1 << (32-y)); + } + } + } + return TRUE; +} + +// accelerated huffman table allows fast O(1) match of all symbols +// of length <= STB_VORBIS_FAST_HUFFMAN_LENGTH +static void compute_accelerated_huffman(Codebook *c) +{ + int i, len; + for (i=0; i < FAST_HUFFMAN_TABLE_SIZE; ++i) + c->fast_huffman[i] = -1; + + len = c->sparse ? c->sorted_entries : c->entries; + #ifdef STB_VORBIS_FAST_HUFFMAN_SHORT + if (len > 32767) len = 32767; // largest possible value we can encode! + #endif + for (i=0; i < len; ++i) { + if (c->codeword_lengths[i] <= STB_VORBIS_FAST_HUFFMAN_LENGTH) { + uint32 z = c->sparse ? bit_reverse(c->sorted_codewords[i]) : c->codewords[i]; + // set table entries for all bit combinations in the higher bits + while (z < FAST_HUFFMAN_TABLE_SIZE) { + c->fast_huffman[z] = i; + z += 1 << c->codeword_lengths[i]; + } + } + } +} + +#ifdef _MSC_VER +#define STBV_CDECL __cdecl +#else +#define STBV_CDECL +#endif + +static int STBV_CDECL uint32_compare(const void *p, const void *q) +{ + uint32 x = * (uint32 *) p; + uint32 y = * (uint32 *) q; + return x < y ? -1 : x > y; +} + +static int include_in_sort(Codebook *c, uint8 len) +{ + if (c->sparse) { assert(len != NO_CODE); return TRUE; } + if (len == NO_CODE) return FALSE; + if (len > STB_VORBIS_FAST_HUFFMAN_LENGTH) return TRUE; + return FALSE; +} + +// if the fast table above doesn't work, we want to binary +// search them... need to reverse the bits +static void compute_sorted_huffman(Codebook *c, uint8 *lengths, uint32 *values) +{ + int i, len; + // build a list of all the entries + // OPTIMIZATION: don't include the short ones, since they'll be caught by FAST_HUFFMAN. + // this is kind of a frivolous optimization--I don't see any performance improvement, + // but it's like 4 extra lines of code, so. + if (!c->sparse) { + int k = 0; + for (i=0; i < c->entries; ++i) + if (include_in_sort(c, lengths[i])) + c->sorted_codewords[k++] = bit_reverse(c->codewords[i]); + assert(k == c->sorted_entries); + } else { + for (i=0; i < c->sorted_entries; ++i) + c->sorted_codewords[i] = bit_reverse(c->codewords[i]); + } + + qsort(c->sorted_codewords, c->sorted_entries, sizeof(c->sorted_codewords[0]), uint32_compare); + c->sorted_codewords[c->sorted_entries] = 0xffffffff; + + len = c->sparse ? c->sorted_entries : c->entries; + // now we need to indicate how they correspond; we could either + // #1: sort a different data structure that says who they correspond to + // #2: for each sorted entry, search the original list to find who corresponds + // #3: for each original entry, find the sorted entry + // #1 requires extra storage, #2 is slow, #3 can use binary search! + for (i=0; i < len; ++i) { + int huff_len = c->sparse ? lengths[values[i]] : lengths[i]; + if (include_in_sort(c,huff_len)) { + uint32 code = bit_reverse(c->codewords[i]); + int x=0, n=c->sorted_entries; + while (n > 1) { + // invariant: sc[x] <= code < sc[x+n] + int m = x + (n >> 1); + if (c->sorted_codewords[m] <= code) { + x = m; + n -= (n>>1); + } else { + n >>= 1; + } + } + assert(c->sorted_codewords[x] == code); + if (c->sparse) { + c->sorted_values[x] = values[i]; + c->codeword_lengths[x] = huff_len; + } else { + c->sorted_values[x] = i; + } + } + } +} + +// only run while parsing the header (3 times) +static int vorbis_validate(uint8 *data) +{ + static uint8 vorbis[6] = { 'v', 'o', 'r', 'b', 'i', 's' }; + return memcmp(data, vorbis, 6) == 0; +} + +// called from setup only, once per code book +// (formula implied by specification) +static int lookup1_values(int entries, int dim) +{ + int r = (int) floor(exp((float) log((float) entries) / dim)); + if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; + ++r; // floor() to avoid _ftol() when non-CRT + if (pow((float) r+1, dim) <= entries) + return -1; + if ((int) floor(pow((float) r, dim)) > entries) + return -1; + return r; +} + +// called twice per file +static void compute_twiddle_factors(int n, float *A, float *B, float *C) +{ + int n4 = n >> 2, n8 = n >> 3; + int k,k2; + + for (k=k2=0; k < n4; ++k,k2+=2) { + A[k2 ] = (float) cos(4*k*M_PI/n); + A[k2+1] = (float) -sin(4*k*M_PI/n); + B[k2 ] = (float) cos((k2+1)*M_PI/n/2) * 0.5f; + B[k2+1] = (float) sin((k2+1)*M_PI/n/2) * 0.5f; + } + for (k=k2=0; k < n8; ++k,k2+=2) { + C[k2 ] = (float) cos(2*(k2+1)*M_PI/n); + C[k2+1] = (float) -sin(2*(k2+1)*M_PI/n); + } +} + +static void compute_window(int n, float *window) +{ + int n2 = n >> 1, i; + for (i=0; i < n2; ++i) + window[i] = (float) sin(0.5 * M_PI * square((float) sin((i - 0 + 0.5) / n2 * 0.5 * M_PI))); +} + +static void compute_bitreverse(int n, uint16 *rev) +{ + int ld = ilog(n) - 1; // ilog is off-by-one from normal definitions + int i, n8 = n >> 3; + for (i=0; i < n8; ++i) + rev[i] = (bit_reverse(i) >> (32-ld+3)) << 2; +} + +static int init_blocksize(vorb *f, int b, int n) +{ + int n2 = n >> 1, n4 = n >> 2, n8 = n >> 3; + f->A[b] = (float *) setup_malloc(f, sizeof(float) * n2); + f->B[b] = (float *) setup_malloc(f, sizeof(float) * n2); + f->C[b] = (float *) setup_malloc(f, sizeof(float) * n4); + if (!f->A[b] || !f->B[b] || !f->C[b]) return error(f, VORBIS_outofmem); + compute_twiddle_factors(n, f->A[b], f->B[b], f->C[b]); + f->window[b] = (float *) setup_malloc(f, sizeof(float) * n2); + if (!f->window[b]) return error(f, VORBIS_outofmem); + compute_window(n, f->window[b]); + f->bit_reverse[b] = (uint16 *) setup_malloc(f, sizeof(uint16) * n8); + if (!f->bit_reverse[b]) return error(f, VORBIS_outofmem); + compute_bitreverse(n, f->bit_reverse[b]); + return TRUE; +} + +static void neighbors(uint16 *x, int n, int *plow, int *phigh) +{ + int low = -1; + int high = 65536; + int i; + for (i=0; i < n; ++i) { + if (x[i] > low && x[i] < x[n]) { *plow = i; low = x[i]; } + if (x[i] < high && x[i] > x[n]) { *phigh = i; high = x[i]; } + } +} + +// this has been repurposed so y is now the original index instead of y +typedef struct +{ + uint16 x,id; +} stbv__floor_ordering; + +static int STBV_CDECL point_compare(const void *p, const void *q) +{ + stbv__floor_ordering *a = (stbv__floor_ordering *) p; + stbv__floor_ordering *b = (stbv__floor_ordering *) q; + return a->x < b->x ? -1 : a->x > b->x; +} + +// +/////////////////////// END LEAF SETUP FUNCTIONS ////////////////////////// + + +#if defined(STB_VORBIS_NO_STDIO) + #define USE_MEMORY(z) TRUE +#else + #define USE_MEMORY(z) ((z)->stream) +#endif + +static uint8 get8(vorb *z) +{ + if (USE_MEMORY(z)) { + if (z->stream >= z->stream_end) { z->eof = TRUE; return 0; } + return *z->stream++; + } + + #ifndef STB_VORBIS_NO_STDIO + { + int c = fgetc(z->f); + if (c == EOF) { z->eof = TRUE; return 0; } + return c; + } + #endif +} + +static uint32 get32(vorb *f) +{ + uint32 x; + x = get8(f); + x += get8(f) << 8; + x += get8(f) << 16; + x += (uint32) get8(f) << 24; + return x; +} + +static int getn(vorb *z, uint8 *data, int n) +{ + if (USE_MEMORY(z)) { + if (z->stream+n > z->stream_end) { z->eof = 1; return 0; } + memcpy(data, z->stream, n); + z->stream += n; + return 1; + } + + #ifndef STB_VORBIS_NO_STDIO + if (fread(data, n, 1, z->f) == 1) + return 1; + else { + z->eof = 1; + return 0; + } + #endif +} + +static void skip(vorb *z, int n) +{ + if (USE_MEMORY(z)) { + z->stream += n; + if (z->stream >= z->stream_end) z->eof = 1; + return; + } + #ifndef STB_VORBIS_NO_STDIO + { + long x = ftell(z->f); + fseek(z->f, x+n, SEEK_SET); + } + #endif +} + +static int set_file_offset(stb_vorbis *f, unsigned int loc) +{ + #ifndef STB_VORBIS_NO_PUSHDATA_API + if (f->push_mode) return 0; + #endif + f->eof = 0; + if (USE_MEMORY(f)) { + if (f->stream_start + loc >= f->stream_end || f->stream_start + loc < f->stream_start) { + f->stream = f->stream_end; + f->eof = 1; + return 0; + } else { + f->stream = f->stream_start + loc; + return 1; + } + } + #ifndef STB_VORBIS_NO_STDIO + if (loc + f->f_start < loc || loc >= 0x80000000) { + loc = 0x7fffffff; + f->eof = 1; + } else { + loc += f->f_start; + } + if (!fseek(f->f, loc, SEEK_SET)) + return 1; + f->eof = 1; + fseek(f->f, f->f_start, SEEK_END); + return 0; + #endif +} + + +static uint8 ogg_page_header[4] = { 0x4f, 0x67, 0x67, 0x53 }; + +static int capture_pattern(vorb *f) +{ + if (0x4f != get8(f)) return FALSE; + if (0x67 != get8(f)) return FALSE; + if (0x67 != get8(f)) return FALSE; + if (0x53 != get8(f)) return FALSE; + return TRUE; +} + +#define PAGEFLAG_continued_packet 1 +#define PAGEFLAG_first_page 2 +#define PAGEFLAG_last_page 4 + +static int start_page_no_capturepattern(vorb *f) +{ + uint32 loc0,loc1,n; + if (f->first_decode && !IS_PUSH_MODE(f)) { + f->p_first.page_start = stb_vorbis_get_file_offset(f) - 4; + } + // stream structure version + if (0 != get8(f)) return error(f, VORBIS_invalid_stream_structure_version); + // header flag + f->page_flag = get8(f); + // absolute granule position + loc0 = get32(f); + loc1 = get32(f); + // @TODO: validate loc0,loc1 as valid positions? + // stream serial number -- vorbis doesn't interleave, so discard + get32(f); + //if (f->serial != get32(f)) return error(f, VORBIS_incorrect_stream_serial_number); + // page sequence number + n = get32(f); + f->last_page = n; + // CRC32 + get32(f); + // page_segments + f->segment_count = get8(f); + if (!getn(f, f->segments, f->segment_count)) + return error(f, VORBIS_unexpected_eof); + // assume we _don't_ know any the sample position of any segments + f->end_seg_with_known_loc = -2; + if (loc0 != ~0U || loc1 != ~0U) { + int i; + // determine which packet is the last one that will complete + for (i=f->segment_count-1; i >= 0; --i) + if (f->segments[i] < 255) + break; + // 'i' is now the index of the _last_ segment of a packet that ends + if (i >= 0) { + f->end_seg_with_known_loc = i; + f->known_loc_for_packet = loc0; + } + } + if (f->first_decode) { + int i,len; + len = 0; + for (i=0; i < f->segment_count; ++i) + len += f->segments[i]; + len += 27 + f->segment_count; + f->p_first.page_end = f->p_first.page_start + len; + f->p_first.last_decoded_sample = loc0; + } + f->next_seg = 0; + return TRUE; +} + +static int start_page(vorb *f) +{ + if (!capture_pattern(f)) return error(f, VORBIS_missing_capture_pattern); + return start_page_no_capturepattern(f); +} + +static int start_packet(vorb *f) +{ + while (f->next_seg == -1) { + if (!start_page(f)) return FALSE; + if (f->page_flag & PAGEFLAG_continued_packet) + return error(f, VORBIS_continued_packet_flag_invalid); + } + f->last_seg = FALSE; + f->valid_bits = 0; + f->packet_bytes = 0; + f->bytes_in_seg = 0; + // f->next_seg is now valid + return TRUE; +} + +static int maybe_start_packet(vorb *f) +{ + if (f->next_seg == -1) { + int x = get8(f); + if (f->eof) return FALSE; // EOF at page boundary is not an error! + if (0x4f != x ) return error(f, VORBIS_missing_capture_pattern); + if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern); + if (0x67 != get8(f)) return error(f, VORBIS_missing_capture_pattern); + if (0x53 != get8(f)) return error(f, VORBIS_missing_capture_pattern); + if (!start_page_no_capturepattern(f)) return FALSE; + if (f->page_flag & PAGEFLAG_continued_packet) { + // set up enough state that we can read this packet if we want, + // e.g. during recovery + f->last_seg = FALSE; + f->bytes_in_seg = 0; + return error(f, VORBIS_continued_packet_flag_invalid); + } + } + return start_packet(f); +} + +static int next_segment(vorb *f) +{ + int len; + if (f->last_seg) return 0; + if (f->next_seg == -1) { + f->last_seg_which = f->segment_count-1; // in case start_page fails + if (!start_page(f)) { f->last_seg = 1; return 0; } + if (!(f->page_flag & PAGEFLAG_continued_packet)) return error(f, VORBIS_continued_packet_flag_invalid); + } + len = f->segments[f->next_seg++]; + if (len < 255) { + f->last_seg = TRUE; + f->last_seg_which = f->next_seg-1; + } + if (f->next_seg >= f->segment_count) + f->next_seg = -1; + assert(f->bytes_in_seg == 0); + f->bytes_in_seg = len; + return len; +} + +#define EOP (-1) +#define INVALID_BITS (-1) + +static int get8_packet_raw(vorb *f) +{ + if (!f->bytes_in_seg) { // CLANG! + if (f->last_seg) return EOP; + else if (!next_segment(f)) return EOP; + } + assert(f->bytes_in_seg > 0); + --f->bytes_in_seg; + ++f->packet_bytes; + return get8(f); +} + +static int get8_packet(vorb *f) +{ + int x = get8_packet_raw(f); + f->valid_bits = 0; + return x; +} + +static int get32_packet(vorb *f) +{ + uint32 x; + x = get8_packet(f); + x += get8_packet(f) << 8; + x += get8_packet(f) << 16; + x += (uint32) get8_packet(f) << 24; + return x; +} + +static void flush_packet(vorb *f) +{ + while (get8_packet_raw(f) != EOP); +} + +// @OPTIMIZE: this is the secondary bit decoder, so it's probably not as important +// as the huffman decoder? +static uint32 get_bits(vorb *f, int n) +{ + uint32 z; + + if (f->valid_bits < 0) return 0; + if (f->valid_bits < n) { + if (n > 24) { + // the accumulator technique below would not work correctly in this case + z = get_bits(f, 24); + z += get_bits(f, n-24) << 24; + return z; + } + if (f->valid_bits == 0) f->acc = 0; + while (f->valid_bits < n) { + int z = get8_packet_raw(f); + if (z == EOP) { + f->valid_bits = INVALID_BITS; + return 0; + } + f->acc += z << f->valid_bits; + f->valid_bits += 8; + } + } + if (f->valid_bits < 0) return 0; + z = f->acc & ((1 << n)-1); + f->acc >>= n; + f->valid_bits -= n; + return z; +} + +// @OPTIMIZE: primary accumulator for huffman +// expand the buffer to as many bits as possible without reading off end of packet +// it might be nice to allow f->valid_bits and f->acc to be stored in registers, +// e.g. cache them locally and decode locally +static __forceinline void prep_huffman(vorb *f) +{ + if (f->valid_bits <= 24) { + if (f->valid_bits == 0) f->acc = 0; + do { + int z; + if (f->last_seg && !f->bytes_in_seg) return; + z = get8_packet_raw(f); + if (z == EOP) return; + f->acc += (unsigned) z << f->valid_bits; + f->valid_bits += 8; + } while (f->valid_bits <= 24); + } +} + +enum +{ + VORBIS_packet_id = 1, + VORBIS_packet_comment = 3, + VORBIS_packet_setup = 5 +}; + +static int codebook_decode_scalar_raw(vorb *f, Codebook *c) +{ + int i; + prep_huffman(f); + + if (c->codewords == NULL && c->sorted_codewords == NULL) + return -1; + + // cases to use binary search: sorted_codewords && !c->codewords + // sorted_codewords && c->entries > 8 + if (c->entries > 8 ? c->sorted_codewords!=NULL : !c->codewords) { + // binary search + uint32 code = bit_reverse(f->acc); + int x=0, n=c->sorted_entries, len; + + while (n > 1) { + // invariant: sc[x] <= code < sc[x+n] + int m = x + (n >> 1); + if (c->sorted_codewords[m] <= code) { + x = m; + n -= (n>>1); + } else { + n >>= 1; + } + } + // x is now the sorted index + if (!c->sparse) x = c->sorted_values[x]; + // x is now sorted index if sparse, or symbol otherwise + len = c->codeword_lengths[x]; + if (f->valid_bits >= len) { + f->acc >>= len; + f->valid_bits -= len; + return x; + } + + f->valid_bits = 0; + return -1; + } + + // if small, linear search + assert(!c->sparse); + for (i=0; i < c->entries; ++i) { + if (c->codeword_lengths[i] == NO_CODE) continue; + if (c->codewords[i] == (f->acc & ((1 << c->codeword_lengths[i])-1))) { + if (f->valid_bits >= c->codeword_lengths[i]) { + f->acc >>= c->codeword_lengths[i]; + f->valid_bits -= c->codeword_lengths[i]; + return i; + } + f->valid_bits = 0; + return -1; + } + } + + error(f, VORBIS_invalid_stream); + f->valid_bits = 0; + return -1; +} + +#ifndef STB_VORBIS_NO_INLINE_DECODE + +#define DECODE_RAW(var, f,c) \ + if (f->valid_bits < STB_VORBIS_FAST_HUFFMAN_LENGTH) \ + prep_huffman(f); \ + var = f->acc & FAST_HUFFMAN_TABLE_MASK; \ + var = c->fast_huffman[var]; \ + if (var >= 0) { \ + int n = c->codeword_lengths[var]; \ + f->acc >>= n; \ + f->valid_bits -= n; \ + if (f->valid_bits < 0) { f->valid_bits = 0; var = -1; } \ + } else { \ + var = codebook_decode_scalar_raw(f,c); \ + } + +#else + +static int codebook_decode_scalar(vorb *f, Codebook *c) +{ + int i; + if (f->valid_bits < STB_VORBIS_FAST_HUFFMAN_LENGTH) + prep_huffman(f); + // fast huffman table lookup + i = f->acc & FAST_HUFFMAN_TABLE_MASK; + i = c->fast_huffman[i]; + if (i >= 0) { + f->acc >>= c->codeword_lengths[i]; + f->valid_bits -= c->codeword_lengths[i]; + if (f->valid_bits < 0) { f->valid_bits = 0; return -1; } + return i; + } + return codebook_decode_scalar_raw(f,c); +} + +#define DECODE_RAW(var,f,c) var = codebook_decode_scalar(f,c); + +#endif + +#define DECODE(var,f,c) \ + DECODE_RAW(var,f,c) \ + if (c->sparse) var = c->sorted_values[var]; + +#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK + #define DECODE_VQ(var,f,c) DECODE_RAW(var,f,c) +#else + #define DECODE_VQ(var,f,c) DECODE(var,f,c) +#endif + + + + + + +// CODEBOOK_ELEMENT_FAST is an optimization for the CODEBOOK_FLOATS case +// where we avoid one addition +#define CODEBOOK_ELEMENT(c,off) (c->multiplicands[off]) +#define CODEBOOK_ELEMENT_FAST(c,off) (c->multiplicands[off]) +#define CODEBOOK_ELEMENT_BASE(c) (0) + +static int codebook_decode_start(vorb *f, Codebook *c) +{ + int z = -1; + + // type 0 is only legal in a scalar context + if (c->lookup_type == 0) + error(f, VORBIS_invalid_stream); + else { + DECODE_VQ(z,f,c); + if (c->sparse) assert(z < c->sorted_entries); + if (z < 0) { // check for EOP + if (!f->bytes_in_seg) + if (f->last_seg) + return z; + error(f, VORBIS_invalid_stream); + } + } + return z; +} + +static int codebook_decode(vorb *f, Codebook *c, float *output, int len) +{ + int i,z = codebook_decode_start(f,c); + if (z < 0) return FALSE; + if (len > c->dimensions) len = c->dimensions; + +#ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK + if (c->lookup_type == 1) { + float last = CODEBOOK_ELEMENT_BASE(c); + int div = 1; + for (i=0; i < len; ++i) { + int off = (z / div) % c->lookup_values; + float val = CODEBOOK_ELEMENT_FAST(c,off) + last; + output[i] += val; + if (c->sequence_p) last = val + c->minimum_value; + div *= c->lookup_values; + } + return TRUE; + } +#endif + + z *= c->dimensions; + if (c->sequence_p) { + float last = CODEBOOK_ELEMENT_BASE(c); + for (i=0; i < len; ++i) { + float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last; + output[i] += val; + last = val + c->minimum_value; + } + } else { + float last = CODEBOOK_ELEMENT_BASE(c); + for (i=0; i < len; ++i) { + output[i] += CODEBOOK_ELEMENT_FAST(c,z+i) + last; + } + } + + return TRUE; +} + +static int codebook_decode_step(vorb *f, Codebook *c, float *output, int len, int step) +{ + int i,z = codebook_decode_start(f,c); + float last = CODEBOOK_ELEMENT_BASE(c); + if (z < 0) return FALSE; + if (len > c->dimensions) len = c->dimensions; + +#ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK + if (c->lookup_type == 1) { + int div = 1; + for (i=0; i < len; ++i) { + int off = (z / div) % c->lookup_values; + float val = CODEBOOK_ELEMENT_FAST(c,off) + last; + output[i*step] += val; + if (c->sequence_p) last = val; + div *= c->lookup_values; + } + return TRUE; + } +#endif + + z *= c->dimensions; + for (i=0; i < len; ++i) { + float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last; + output[i*step] += val; + if (c->sequence_p) last = val; + } + + return TRUE; +} + +static int codebook_decode_deinterleave_repeat(vorb *f, Codebook *c, float **outputs, int ch, int *c_inter_p, int *p_inter_p, int len, int total_decode) +{ + int c_inter = *c_inter_p; + int p_inter = *p_inter_p; + int i,z, effective = c->dimensions; + + // type 0 is only legal in a scalar context + if (c->lookup_type == 0) return error(f, VORBIS_invalid_stream); + + while (total_decode > 0) { + float last = CODEBOOK_ELEMENT_BASE(c); + DECODE_VQ(z,f,c); + #ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK + assert(!c->sparse || z < c->sorted_entries); + #endif + if (z < 0) { + if (!f->bytes_in_seg) + if (f->last_seg) return FALSE; + return error(f, VORBIS_invalid_stream); + } + + // if this will take us off the end of the buffers, stop short! + // we check by computing the length of the virtual interleaved + // buffer (len*ch), our current offset within it (p_inter*ch)+(c_inter), + // and the length we'll be using (effective) + if (c_inter + p_inter*ch + effective > len * ch) { + effective = len*ch - (p_inter*ch - c_inter); + } + + #ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK + if (c->lookup_type == 1) { + int div = 1; + for (i=0; i < effective; ++i) { + int off = (z / div) % c->lookup_values; + float val = CODEBOOK_ELEMENT_FAST(c,off) + last; + if (outputs[c_inter]) + outputs[c_inter][p_inter] += val; + if (++c_inter == ch) { c_inter = 0; ++p_inter; } + if (c->sequence_p) last = val; + div *= c->lookup_values; + } + } else + #endif + { + z *= c->dimensions; + if (c->sequence_p) { + for (i=0; i < effective; ++i) { + float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last; + if (outputs[c_inter]) + outputs[c_inter][p_inter] += val; + if (++c_inter == ch) { c_inter = 0; ++p_inter; } + last = val; + } + } else { + for (i=0; i < effective; ++i) { + float val = CODEBOOK_ELEMENT_FAST(c,z+i) + last; + if (outputs[c_inter]) + outputs[c_inter][p_inter] += val; + if (++c_inter == ch) { c_inter = 0; ++p_inter; } + } + } + } + + total_decode -= effective; + } + *c_inter_p = c_inter; + *p_inter_p = p_inter; + return TRUE; +} + +static int predict_point(int x, int x0, int x1, int y0, int y1) +{ + int dy = y1 - y0; + int adx = x1 - x0; + // @OPTIMIZE: force int division to round in the right direction... is this necessary on x86? + int err = abs(dy) * (x - x0); + int off = err / adx; + return dy < 0 ? y0 - off : y0 + off; +} + +// the following table is block-copied from the specification +static float inverse_db_table[256] = +{ + 1.0649863e-07f, 1.1341951e-07f, 1.2079015e-07f, 1.2863978e-07f, + 1.3699951e-07f, 1.4590251e-07f, 1.5538408e-07f, 1.6548181e-07f, + 1.7623575e-07f, 1.8768855e-07f, 1.9988561e-07f, 2.1287530e-07f, + 2.2670913e-07f, 2.4144197e-07f, 2.5713223e-07f, 2.7384213e-07f, + 2.9163793e-07f, 3.1059021e-07f, 3.3077411e-07f, 3.5226968e-07f, + 3.7516214e-07f, 3.9954229e-07f, 4.2550680e-07f, 4.5315863e-07f, + 4.8260743e-07f, 5.1396998e-07f, 5.4737065e-07f, 5.8294187e-07f, + 6.2082472e-07f, 6.6116941e-07f, 7.0413592e-07f, 7.4989464e-07f, + 7.9862701e-07f, 8.5052630e-07f, 9.0579828e-07f, 9.6466216e-07f, + 1.0273513e-06f, 1.0941144e-06f, 1.1652161e-06f, 1.2409384e-06f, + 1.3215816e-06f, 1.4074654e-06f, 1.4989305e-06f, 1.5963394e-06f, + 1.7000785e-06f, 1.8105592e-06f, 1.9282195e-06f, 2.0535261e-06f, + 2.1869758e-06f, 2.3290978e-06f, 2.4804557e-06f, 2.6416497e-06f, + 2.8133190e-06f, 2.9961443e-06f, 3.1908506e-06f, 3.3982101e-06f, + 3.6190449e-06f, 3.8542308e-06f, 4.1047004e-06f, 4.3714470e-06f, + 4.6555282e-06f, 4.9580707e-06f, 5.2802740e-06f, 5.6234160e-06f, + 5.9888572e-06f, 6.3780469e-06f, 6.7925283e-06f, 7.2339451e-06f, + 7.7040476e-06f, 8.2047000e-06f, 8.7378876e-06f, 9.3057248e-06f, + 9.9104632e-06f, 1.0554501e-05f, 1.1240392e-05f, 1.1970856e-05f, + 1.2748789e-05f, 1.3577278e-05f, 1.4459606e-05f, 1.5399272e-05f, + 1.6400004e-05f, 1.7465768e-05f, 1.8600792e-05f, 1.9809576e-05f, + 2.1096914e-05f, 2.2467911e-05f, 2.3928002e-05f, 2.5482978e-05f, + 2.7139006e-05f, 2.8902651e-05f, 3.0780908e-05f, 3.2781225e-05f, + 3.4911534e-05f, 3.7180282e-05f, 3.9596466e-05f, 4.2169667e-05f, + 4.4910090e-05f, 4.7828601e-05f, 5.0936773e-05f, 5.4246931e-05f, + 5.7772202e-05f, 6.1526565e-05f, 6.5524908e-05f, 6.9783085e-05f, + 7.4317983e-05f, 7.9147585e-05f, 8.4291040e-05f, 8.9768747e-05f, + 9.5602426e-05f, 0.00010181521f, 0.00010843174f, 0.00011547824f, + 0.00012298267f, 0.00013097477f, 0.00013948625f, 0.00014855085f, + 0.00015820453f, 0.00016848555f, 0.00017943469f, 0.00019109536f, + 0.00020351382f, 0.00021673929f, 0.00023082423f, 0.00024582449f, + 0.00026179955f, 0.00027881276f, 0.00029693158f, 0.00031622787f, + 0.00033677814f, 0.00035866388f, 0.00038197188f, 0.00040679456f, + 0.00043323036f, 0.00046138411f, 0.00049136745f, 0.00052329927f, + 0.00055730621f, 0.00059352311f, 0.00063209358f, 0.00067317058f, + 0.00071691700f, 0.00076350630f, 0.00081312324f, 0.00086596457f, + 0.00092223983f, 0.00098217216f, 0.0010459992f, 0.0011139742f, + 0.0011863665f, 0.0012634633f, 0.0013455702f, 0.0014330129f, + 0.0015261382f, 0.0016253153f, 0.0017309374f, 0.0018434235f, + 0.0019632195f, 0.0020908006f, 0.0022266726f, 0.0023713743f, + 0.0025254795f, 0.0026895994f, 0.0028643847f, 0.0030505286f, + 0.0032487691f, 0.0034598925f, 0.0036847358f, 0.0039241906f, + 0.0041792066f, 0.0044507950f, 0.0047400328f, 0.0050480668f, + 0.0053761186f, 0.0057254891f, 0.0060975636f, 0.0064938176f, + 0.0069158225f, 0.0073652516f, 0.0078438871f, 0.0083536271f, + 0.0088964928f, 0.009474637f, 0.010090352f, 0.010746080f, + 0.011444421f, 0.012188144f, 0.012980198f, 0.013823725f, + 0.014722068f, 0.015678791f, 0.016697687f, 0.017782797f, + 0.018938423f, 0.020169149f, 0.021479854f, 0.022875735f, + 0.024362330f, 0.025945531f, 0.027631618f, 0.029427276f, + 0.031339626f, 0.033376252f, 0.035545228f, 0.037855157f, + 0.040315199f, 0.042935108f, 0.045725273f, 0.048696758f, + 0.051861348f, 0.055231591f, 0.058820850f, 0.062643361f, + 0.066714279f, 0.071049749f, 0.075666962f, 0.080584227f, + 0.085821044f, 0.091398179f, 0.097337747f, 0.10366330f, + 0.11039993f, 0.11757434f, 0.12521498f, 0.13335215f, + 0.14201813f, 0.15124727f, 0.16107617f, 0.17154380f, + 0.18269168f, 0.19456402f, 0.20720788f, 0.22067342f, + 0.23501402f, 0.25028656f, 0.26655159f, 0.28387361f, + 0.30232132f, 0.32196786f, 0.34289114f, 0.36517414f, + 0.38890521f, 0.41417847f, 0.44109412f, 0.46975890f, + 0.50028648f, 0.53279791f, 0.56742212f, 0.60429640f, + 0.64356699f, 0.68538959f, 0.72993007f, 0.77736504f, + 0.82788260f, 0.88168307f, 0.9389798f, 1.0f +}; + + +// @OPTIMIZE: if you want to replace this bresenham line-drawing routine, +// note that you must produce bit-identical output to decode correctly; +// this specific sequence of operations is specified in the spec (it's +// drawing integer-quantized frequency-space lines that the encoder +// expects to be exactly the same) +// ... also, isn't the whole point of Bresenham's algorithm to NOT +// have to divide in the setup? sigh. +#ifndef STB_VORBIS_NO_DEFER_FLOOR +#define LINE_OP(a,b) a *= b +#else +#define LINE_OP(a,b) a = b +#endif + +#ifdef STB_VORBIS_DIVIDE_TABLE +#define DIVTAB_NUMER 32 +#define DIVTAB_DENOM 64 +int8 integer_divide_table[DIVTAB_NUMER][DIVTAB_DENOM]; // 2KB +#endif + +static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) +{ + int dy = y1 - y0; + int adx = x1 - x0; + int ady = abs(dy); + int base; + int x=x0,y=y0; + int err = 0; + int sy; + +#ifdef STB_VORBIS_DIVIDE_TABLE + if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { + if (dy < 0) { + base = -integer_divide_table[ady][adx]; + sy = base-1; + } else { + base = integer_divide_table[ady][adx]; + sy = base+1; + } + } else { + base = dy / adx; + if (dy < 0) + sy = base - 1; + else + sy = base+1; + } +#else + base = dy / adx; + if (dy < 0) + sy = base - 1; + else + sy = base+1; +#endif + ady -= abs(base) * adx; + if (x1 > n) x1 = n; + if (x < x1) { + LINE_OP(output[x], inverse_db_table[y&255]); + for (++x; x < x1; ++x) { + err += ady; + if (err >= adx) { + err -= adx; + y += sy; + } else + y += base; + LINE_OP(output[x], inverse_db_table[y&255]); + } + } +} + +static int residue_decode(vorb *f, Codebook *book, float *target, int offset, int n, int rtype) +{ + int k; + if (rtype == 0) { + int step = n / book->dimensions; + for (k=0; k < step; ++k) + if (!codebook_decode_step(f, book, target+offset+k, n-offset-k, step)) + return FALSE; + } else { + for (k=0; k < n; ) { + if (!codebook_decode(f, book, target+offset, n-k)) + return FALSE; + k += book->dimensions; + offset += book->dimensions; + } + } + return TRUE; +} + +// n is 1/2 of the blocksize -- +// specification: "Correct per-vector decode length is [n]/2" +static void decode_residue(vorb *f, float *residue_buffers[], int ch, int n, int rn, uint8 *do_not_decode) +{ + int i,j,pass; + Residue *r = f->residue_config + rn; + int rtype = f->residue_types[rn]; + int c = r->classbook; + int classwords = f->codebooks[c].dimensions; + unsigned int actual_size = rtype == 2 ? n*2 : n; + unsigned int limit_r_begin = (r->begin < actual_size ? r->begin : actual_size); + unsigned int limit_r_end = (r->end < actual_size ? r->end : actual_size); + int n_read = limit_r_end - limit_r_begin; + int part_read = n_read / r->part_size; + int temp_alloc_point = temp_alloc_save(f); + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + uint8 ***part_classdata = (uint8 ***) temp_block_array(f,f->channels, part_read * sizeof(**part_classdata)); + #else + int **classifications = (int **) temp_block_array(f,f->channels, part_read * sizeof(**classifications)); + #endif + + CHECK(f); + + for (i=0; i < ch; ++i) + if (!do_not_decode[i]) + memset(residue_buffers[i], 0, sizeof(float) * n); + + if (rtype == 2 && ch != 1) { + for (j=0; j < ch; ++j) + if (!do_not_decode[j]) + break; + if (j == ch) + goto done; + + for (pass=0; pass < 8; ++pass) { + int pcount = 0, class_set = 0; + if (ch == 2) { + while (pcount < part_read) { + int z = r->begin + pcount*r->part_size; + int c_inter = (z & 1), p_inter = z>>1; + if (pass == 0) { + Codebook *c = f->codebooks+r->classbook; + int q; + DECODE(q,f,c); + if (q == EOP) goto done; + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + part_classdata[0][class_set] = r->classdata[q]; + #else + for (i=classwords-1; i >= 0; --i) { + classifications[0][i+pcount] = q % r->classifications; + q /= r->classifications; + } + #endif + } + for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) { + int z = r->begin + pcount*r->part_size; + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + int c = part_classdata[0][class_set][i]; + #else + int c = classifications[0][pcount]; + #endif + int b = r->residue_books[c][pass]; + if (b >= 0) { + Codebook *book = f->codebooks + b; + #ifdef STB_VORBIS_DIVIDES_IN_CODEBOOK + if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size)) + goto done; + #else + // saves 1% + if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size)) + goto done; + #endif + } else { + z += r->part_size; + c_inter = z & 1; + p_inter = z >> 1; + } + } + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + ++class_set; + #endif + } + } else if (ch > 2) { + while (pcount < part_read) { + int z = r->begin + pcount*r->part_size; + int c_inter = z % ch, p_inter = z/ch; + if (pass == 0) { + Codebook *c = f->codebooks+r->classbook; + int q; + DECODE(q,f,c); + if (q == EOP) goto done; + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + part_classdata[0][class_set] = r->classdata[q]; + #else + for (i=classwords-1; i >= 0; --i) { + classifications[0][i+pcount] = q % r->classifications; + q /= r->classifications; + } + #endif + } + for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) { + int z = r->begin + pcount*r->part_size; + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + int c = part_classdata[0][class_set][i]; + #else + int c = classifications[0][pcount]; + #endif + int b = r->residue_books[c][pass]; + if (b >= 0) { + Codebook *book = f->codebooks + b; + if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size)) + goto done; + } else { + z += r->part_size; + c_inter = z % ch; + p_inter = z / ch; + } + } + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + ++class_set; + #endif + } + } + } + goto done; + } + CHECK(f); + + for (pass=0; pass < 8; ++pass) { + int pcount = 0, class_set=0; + while (pcount < part_read) { + if (pass == 0) { + for (j=0; j < ch; ++j) { + if (!do_not_decode[j]) { + Codebook *c = f->codebooks+r->classbook; + int temp; + DECODE(temp,f,c); + if (temp == EOP) goto done; + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + part_classdata[j][class_set] = r->classdata[temp]; + #else + for (i=classwords-1; i >= 0; --i) { + classifications[j][i+pcount] = temp % r->classifications; + temp /= r->classifications; + } + #endif + } + } + } + for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) { + for (j=0; j < ch; ++j) { + if (!do_not_decode[j]) { + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + int c = part_classdata[j][class_set][i]; + #else + int c = classifications[j][pcount]; + #endif + int b = r->residue_books[c][pass]; + if (b >= 0) { + float *target = residue_buffers[j]; + int offset = r->begin + pcount * r->part_size; + int n = r->part_size; + Codebook *book = f->codebooks + b; + if (!residue_decode(f, book, target, offset, n, rtype)) + goto done; + } + } + } + } + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + ++class_set; + #endif + } + } + done: + CHECK(f); + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + temp_free(f,part_classdata); + #else + temp_free(f,classifications); + #endif + temp_alloc_restore(f,temp_alloc_point); +} + + +#if 0 +// slow way for debugging +void inverse_mdct_slow(float *buffer, int n) +{ + int i,j; + int n2 = n >> 1; + float *x = (float *) malloc(sizeof(*x) * n2); + memcpy(x, buffer, sizeof(*x) * n2); + for (i=0; i < n; ++i) { + float acc = 0; + for (j=0; j < n2; ++j) + // formula from paper: + //acc += n/4.0f * x[j] * (float) cos(M_PI / 2 / n * (2 * i + 1 + n/2.0)*(2*j+1)); + // formula from wikipedia + //acc += 2.0f / n2 * x[j] * (float) cos(M_PI/n2 * (i + 0.5 + n2/2)*(j + 0.5)); + // these are equivalent, except the formula from the paper inverts the multiplier! + // however, what actually works is NO MULTIPLIER!?! + //acc += 64 * 2.0f / n2 * x[j] * (float) cos(M_PI/n2 * (i + 0.5 + n2/2)*(j + 0.5)); + acc += x[j] * (float) cos(M_PI / 2 / n * (2 * i + 1 + n/2.0)*(2*j+1)); + buffer[i] = acc; + } + free(x); +} +#elif 0 +// same as above, but just barely able to run in real time on modern machines +void inverse_mdct_slow(float *buffer, int n, vorb *f, int blocktype) +{ + float mcos[16384]; + int i,j; + int n2 = n >> 1, nmask = (n << 2) -1; + float *x = (float *) malloc(sizeof(*x) * n2); + memcpy(x, buffer, sizeof(*x) * n2); + for (i=0; i < 4*n; ++i) + mcos[i] = (float) cos(M_PI / 2 * i / n); + + for (i=0; i < n; ++i) { + float acc = 0; + for (j=0; j < n2; ++j) + acc += x[j] * mcos[(2 * i + 1 + n2)*(2*j+1) & nmask]; + buffer[i] = acc; + } + free(x); +} +#elif 0 +// transform to use a slow dct-iv; this is STILL basically trivial, +// but only requires half as many ops +void dct_iv_slow(float *buffer, int n) +{ + float mcos[16384]; + float x[2048]; + int i,j; + int n2 = n >> 1, nmask = (n << 3) - 1; + memcpy(x, buffer, sizeof(*x) * n); + for (i=0; i < 8*n; ++i) + mcos[i] = (float) cos(M_PI / 4 * i / n); + for (i=0; i < n; ++i) { + float acc = 0; + for (j=0; j < n; ++j) + acc += x[j] * mcos[((2 * i + 1)*(2*j+1)) & nmask]; + buffer[i] = acc; + } +} + +void inverse_mdct_slow(float *buffer, int n, vorb *f, int blocktype) +{ + int i, n4 = n >> 2, n2 = n >> 1, n3_4 = n - n4; + float temp[4096]; + + memcpy(temp, buffer, n2 * sizeof(float)); + dct_iv_slow(temp, n2); // returns -c'-d, a-b' + + for (i=0; i < n4 ; ++i) buffer[i] = temp[i+n4]; // a-b' + for ( ; i < n3_4; ++i) buffer[i] = -temp[n3_4 - i - 1]; // b-a', c+d' + for ( ; i < n ; ++i) buffer[i] = -temp[i - n3_4]; // c'+d +} +#endif + +#ifndef LIBVORBIS_MDCT +#define LIBVORBIS_MDCT 0 +#endif + +#if LIBVORBIS_MDCT +// directly call the vorbis MDCT using an interface documented +// by Jeff Roberts... useful for performance comparison +typedef struct +{ + int n; + int log2n; + + float *trig; + int *bitrev; + + float scale; +} mdct_lookup; + +extern void mdct_init(mdct_lookup *lookup, int n); +extern void mdct_clear(mdct_lookup *l); +extern void mdct_backward(mdct_lookup *init, float *in, float *out); + +mdct_lookup M1,M2; + +void inverse_mdct(float *buffer, int n, vorb *f, int blocktype) +{ + mdct_lookup *M; + if (M1.n == n) M = &M1; + else if (M2.n == n) M = &M2; + else if (M1.n == 0) { mdct_init(&M1, n); M = &M1; } + else { + if (M2.n) __asm int 3; + mdct_init(&M2, n); + M = &M2; + } + + mdct_backward(M, buffer, buffer); +} +#endif + + +// the following were split out into separate functions while optimizing; +// they could be pushed back up but eh. __forceinline showed no change; +// they're probably already being inlined. +static void imdct_step3_iter0_loop(int n, float *e, int i_off, int k_off, float *A) +{ + float *ee0 = e + i_off; + float *ee2 = ee0 + k_off; + int i; + + assert((n & 3) == 0); + for (i=(n>>2); i > 0; --i) { + float k00_20, k01_21; + k00_20 = ee0[ 0] - ee2[ 0]; + k01_21 = ee0[-1] - ee2[-1]; + ee0[ 0] += ee2[ 0];//ee0[ 0] = ee0[ 0] + ee2[ 0]; + ee0[-1] += ee2[-1];//ee0[-1] = ee0[-1] + ee2[-1]; + ee2[ 0] = k00_20 * A[0] - k01_21 * A[1]; + ee2[-1] = k01_21 * A[0] + k00_20 * A[1]; + A += 8; + + k00_20 = ee0[-2] - ee2[-2]; + k01_21 = ee0[-3] - ee2[-3]; + ee0[-2] += ee2[-2];//ee0[-2] = ee0[-2] + ee2[-2]; + ee0[-3] += ee2[-3];//ee0[-3] = ee0[-3] + ee2[-3]; + ee2[-2] = k00_20 * A[0] - k01_21 * A[1]; + ee2[-3] = k01_21 * A[0] + k00_20 * A[1]; + A += 8; + + k00_20 = ee0[-4] - ee2[-4]; + k01_21 = ee0[-5] - ee2[-5]; + ee0[-4] += ee2[-4];//ee0[-4] = ee0[-4] + ee2[-4]; + ee0[-5] += ee2[-5];//ee0[-5] = ee0[-5] + ee2[-5]; + ee2[-4] = k00_20 * A[0] - k01_21 * A[1]; + ee2[-5] = k01_21 * A[0] + k00_20 * A[1]; + A += 8; + + k00_20 = ee0[-6] - ee2[-6]; + k01_21 = ee0[-7] - ee2[-7]; + ee0[-6] += ee2[-6];//ee0[-6] = ee0[-6] + ee2[-6]; + ee0[-7] += ee2[-7];//ee0[-7] = ee0[-7] + ee2[-7]; + ee2[-6] = k00_20 * A[0] - k01_21 * A[1]; + ee2[-7] = k01_21 * A[0] + k00_20 * A[1]; + A += 8; + ee0 -= 8; + ee2 -= 8; + } +} + +static void imdct_step3_inner_r_loop(int lim, float *e, int d0, int k_off, float *A, int k1) +{ + int i; + float k00_20, k01_21; + + float *e0 = e + d0; + float *e2 = e0 + k_off; + + for (i=lim >> 2; i > 0; --i) { + k00_20 = e0[-0] - e2[-0]; + k01_21 = e0[-1] - e2[-1]; + e0[-0] += e2[-0];//e0[-0] = e0[-0] + e2[-0]; + e0[-1] += e2[-1];//e0[-1] = e0[-1] + e2[-1]; + e2[-0] = (k00_20)*A[0] - (k01_21) * A[1]; + e2[-1] = (k01_21)*A[0] + (k00_20) * A[1]; + + A += k1; + + k00_20 = e0[-2] - e2[-2]; + k01_21 = e0[-3] - e2[-3]; + e0[-2] += e2[-2];//e0[-2] = e0[-2] + e2[-2]; + e0[-3] += e2[-3];//e0[-3] = e0[-3] + e2[-3]; + e2[-2] = (k00_20)*A[0] - (k01_21) * A[1]; + e2[-3] = (k01_21)*A[0] + (k00_20) * A[1]; + + A += k1; + + k00_20 = e0[-4] - e2[-4]; + k01_21 = e0[-5] - e2[-5]; + e0[-4] += e2[-4];//e0[-4] = e0[-4] + e2[-4]; + e0[-5] += e2[-5];//e0[-5] = e0[-5] + e2[-5]; + e2[-4] = (k00_20)*A[0] - (k01_21) * A[1]; + e2[-5] = (k01_21)*A[0] + (k00_20) * A[1]; + + A += k1; + + k00_20 = e0[-6] - e2[-6]; + k01_21 = e0[-7] - e2[-7]; + e0[-6] += e2[-6];//e0[-6] = e0[-6] + e2[-6]; + e0[-7] += e2[-7];//e0[-7] = e0[-7] + e2[-7]; + e2[-6] = (k00_20)*A[0] - (k01_21) * A[1]; + e2[-7] = (k01_21)*A[0] + (k00_20) * A[1]; + + e0 -= 8; + e2 -= 8; + + A += k1; + } +} + +static void imdct_step3_inner_s_loop(int n, float *e, int i_off, int k_off, float *A, int a_off, int k0) +{ + int i; + float A0 = A[0]; + float A1 = A[0+1]; + float A2 = A[0+a_off]; + float A3 = A[0+a_off+1]; + float A4 = A[0+a_off*2+0]; + float A5 = A[0+a_off*2+1]; + float A6 = A[0+a_off*3+0]; + float A7 = A[0+a_off*3+1]; + + float k00,k11; + + float *ee0 = e +i_off; + float *ee2 = ee0+k_off; + + for (i=n; i > 0; --i) { + k00 = ee0[ 0] - ee2[ 0]; + k11 = ee0[-1] - ee2[-1]; + ee0[ 0] = ee0[ 0] + ee2[ 0]; + ee0[-1] = ee0[-1] + ee2[-1]; + ee2[ 0] = (k00) * A0 - (k11) * A1; + ee2[-1] = (k11) * A0 + (k00) * A1; + + k00 = ee0[-2] - ee2[-2]; + k11 = ee0[-3] - ee2[-3]; + ee0[-2] = ee0[-2] + ee2[-2]; + ee0[-3] = ee0[-3] + ee2[-3]; + ee2[-2] = (k00) * A2 - (k11) * A3; + ee2[-3] = (k11) * A2 + (k00) * A3; + + k00 = ee0[-4] - ee2[-4]; + k11 = ee0[-5] - ee2[-5]; + ee0[-4] = ee0[-4] + ee2[-4]; + ee0[-5] = ee0[-5] + ee2[-5]; + ee2[-4] = (k00) * A4 - (k11) * A5; + ee2[-5] = (k11) * A4 + (k00) * A5; + + k00 = ee0[-6] - ee2[-6]; + k11 = ee0[-7] - ee2[-7]; + ee0[-6] = ee0[-6] + ee2[-6]; + ee0[-7] = ee0[-7] + ee2[-7]; + ee2[-6] = (k00) * A6 - (k11) * A7; + ee2[-7] = (k11) * A6 + (k00) * A7; + + ee0 -= k0; + ee2 -= k0; + } +} + +static __forceinline void iter_54(float *z) +{ + float k00,k11,k22,k33; + float y0,y1,y2,y3; + + k00 = z[ 0] - z[-4]; + y0 = z[ 0] + z[-4]; + y2 = z[-2] + z[-6]; + k22 = z[-2] - z[-6]; + + z[-0] = y0 + y2; // z0 + z4 + z2 + z6 + z[-2] = y0 - y2; // z0 + z4 - z2 - z6 + + // done with y0,y2 + + k33 = z[-3] - z[-7]; + + z[-4] = k00 + k33; // z0 - z4 + z3 - z7 + z[-6] = k00 - k33; // z0 - z4 - z3 + z7 + + // done with k33 + + k11 = z[-1] - z[-5]; + y1 = z[-1] + z[-5]; + y3 = z[-3] + z[-7]; + + z[-1] = y1 + y3; // z1 + z5 + z3 + z7 + z[-3] = y1 - y3; // z1 + z5 - z3 - z7 + z[-5] = k11 - k22; // z1 - z5 + z2 - z6 + z[-7] = k11 + k22; // z1 - z5 - z2 + z6 +} + +static void imdct_step3_inner_s_loop_ld654(int n, float *e, int i_off, float *A, int base_n) +{ + int a_off = base_n >> 3; + float A2 = A[0+a_off]; + float *z = e + i_off; + float *base = z - 16 * n; + + while (z > base) { + float k00,k11; + + k00 = z[-0] - z[-8]; + k11 = z[-1] - z[-9]; + z[-0] = z[-0] + z[-8]; + z[-1] = z[-1] + z[-9]; + z[-8] = k00; + z[-9] = k11 ; + + k00 = z[ -2] - z[-10]; + k11 = z[ -3] - z[-11]; + z[ -2] = z[ -2] + z[-10]; + z[ -3] = z[ -3] + z[-11]; + z[-10] = (k00+k11) * A2; + z[-11] = (k11-k00) * A2; + + k00 = z[-12] - z[ -4]; // reverse to avoid a unary negation + k11 = z[ -5] - z[-13]; + z[ -4] = z[ -4] + z[-12]; + z[ -5] = z[ -5] + z[-13]; + z[-12] = k11; + z[-13] = k00; + + k00 = z[-14] - z[ -6]; // reverse to avoid a unary negation + k11 = z[ -7] - z[-15]; + z[ -6] = z[ -6] + z[-14]; + z[ -7] = z[ -7] + z[-15]; + z[-14] = (k00+k11) * A2; + z[-15] = (k00-k11) * A2; + + iter_54(z); + iter_54(z-8); + z -= 16; + } +} + +static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype) +{ + int n2 = n >> 1, n4 = n >> 2, n8 = n >> 3, l; + int ld; + // @OPTIMIZE: reduce register pressure by using fewer variables? + int save_point = temp_alloc_save(f); + float *buf2 = (float *) temp_alloc(f, n2 * sizeof(*buf2)); + float *u=NULL,*v=NULL; + // twiddle factors + float *A = f->A[blocktype]; + + // IMDCT algorithm from "The use of multirate filter banks for coding of high quality digital audio" + // See notes about bugs in that paper in less-optimal implementation 'inverse_mdct_old' after this function. + + // kernel from paper + + + // merged: + // copy and reflect spectral data + // step 0 + + // note that it turns out that the items added together during + // this step are, in fact, being added to themselves (as reflected + // by step 0). inexplicable inefficiency! this became obvious + // once I combined the passes. + + // so there's a missing 'times 2' here (for adding X to itself). + // this propagates through linearly to the end, where the numbers + // are 1/2 too small, and need to be compensated for. + + { + float *d,*e, *AA, *e_stop; + d = &buf2[n2-2]; + AA = A; + e = &buffer[0]; + e_stop = &buffer[n2]; + while (e != e_stop) { + d[1] = (e[0] * AA[0] - e[2]*AA[1]); + d[0] = (e[0] * AA[1] + e[2]*AA[0]); + d -= 2; + AA += 2; + e += 4; + } + + e = &buffer[n2-3]; + while (d >= buf2) { + d[1] = (-e[2] * AA[0] - -e[0]*AA[1]); + d[0] = (-e[2] * AA[1] + -e[0]*AA[0]); + d -= 2; + AA += 2; + e -= 4; + } + } + + // now we use symbolic names for these, so that we can + // possibly swap their meaning as we change which operations + // are in place + + u = buffer; + v = buf2; + + // step 2 (paper output is w, now u) + // this could be in place, but the data ends up in the wrong + // place... _somebody_'s got to swap it, so this is nominated + { + float *AA = &A[n2-8]; + float *d0,*d1, *e0, *e1; + + e0 = &v[n4]; + e1 = &v[0]; + + d0 = &u[n4]; + d1 = &u[0]; + + while (AA >= A) { + float v40_20, v41_21; + + v41_21 = e0[1] - e1[1]; + v40_20 = e0[0] - e1[0]; + d0[1] = e0[1] + e1[1]; + d0[0] = e0[0] + e1[0]; + d1[1] = v41_21*AA[4] - v40_20*AA[5]; + d1[0] = v40_20*AA[4] + v41_21*AA[5]; + + v41_21 = e0[3] - e1[3]; + v40_20 = e0[2] - e1[2]; + d0[3] = e0[3] + e1[3]; + d0[2] = e0[2] + e1[2]; + d1[3] = v41_21*AA[0] - v40_20*AA[1]; + d1[2] = v40_20*AA[0] + v41_21*AA[1]; + + AA -= 8; + + d0 += 4; + d1 += 4; + e0 += 4; + e1 += 4; + } + } + + // step 3 + ld = ilog(n) - 1; // ilog is off-by-one from normal definitions + + // optimized step 3: + + // the original step3 loop can be nested r inside s or s inside r; + // it's written originally as s inside r, but this is dumb when r + // iterates many times, and s few. So I have two copies of it and + // switch between them halfway. + + // this is iteration 0 of step 3 + imdct_step3_iter0_loop(n >> 4, u, n2-1-n4*0, -(n >> 3), A); + imdct_step3_iter0_loop(n >> 4, u, n2-1-n4*1, -(n >> 3), A); + + // this is iteration 1 of step 3 + imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*0, -(n >> 4), A, 16); + imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*1, -(n >> 4), A, 16); + imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*2, -(n >> 4), A, 16); + imdct_step3_inner_r_loop(n >> 5, u, n2-1 - n8*3, -(n >> 4), A, 16); + + l=2; + for (; l < (ld-3)>>1; ++l) { + int k0 = n >> (l+2), k0_2 = k0>>1; + int lim = 1 << (l+1); + int i; + for (i=0; i < lim; ++i) + imdct_step3_inner_r_loop(n >> (l+4), u, n2-1 - k0*i, -k0_2, A, 1 << (l+3)); + } + + for (; l < ld-6; ++l) { + int k0 = n >> (l+2), k1 = 1 << (l+3), k0_2 = k0>>1; + int rlim = n >> (l+6), r; + int lim = 1 << (l+1); + int i_off; + float *A0 = A; + i_off = n2-1; + for (r=rlim; r > 0; --r) { + imdct_step3_inner_s_loop(lim, u, i_off, -k0_2, A0, k1, k0); + A0 += k1*4; + i_off -= 8; + } + } + + // iterations with count: + // ld-6,-5,-4 all interleaved together + // the big win comes from getting rid of needless flops + // due to the constants on pass 5 & 4 being all 1 and 0; + // combining them to be simultaneous to improve cache made little difference + imdct_step3_inner_s_loop_ld654(n >> 5, u, n2-1, A, n); + + // output is u + + // step 4, 5, and 6 + // cannot be in-place because of step 5 + { + uint16 *bitrev = f->bit_reverse[blocktype]; + // weirdly, I'd have thought reading sequentially and writing + // erratically would have been better than vice-versa, but in + // fact that's not what my testing showed. (That is, with + // j = bitreverse(i), do you read i and write j, or read j and write i.) + + float *d0 = &v[n4-4]; + float *d1 = &v[n2-4]; + while (d0 >= v) { + int k4; + + k4 = bitrev[0]; + d1[3] = u[k4+0]; + d1[2] = u[k4+1]; + d0[3] = u[k4+2]; + d0[2] = u[k4+3]; + + k4 = bitrev[1]; + d1[1] = u[k4+0]; + d1[0] = u[k4+1]; + d0[1] = u[k4+2]; + d0[0] = u[k4+3]; + + d0 -= 4; + d1 -= 4; + bitrev += 2; + } + } + // (paper output is u, now v) + + + // data must be in buf2 + assert(v == buf2); + + // step 7 (paper output is v, now v) + // this is now in place + { + float *C = f->C[blocktype]; + float *d, *e; + + d = v; + e = v + n2 - 4; + + while (d < e) { + float a02,a11,b0,b1,b2,b3; + + a02 = d[0] - e[2]; + a11 = d[1] + e[3]; + + b0 = C[1]*a02 + C[0]*a11; + b1 = C[1]*a11 - C[0]*a02; + + b2 = d[0] + e[ 2]; + b3 = d[1] - e[ 3]; + + d[0] = b2 + b0; + d[1] = b3 + b1; + e[2] = b2 - b0; + e[3] = b1 - b3; + + a02 = d[2] - e[0]; + a11 = d[3] + e[1]; + + b0 = C[3]*a02 + C[2]*a11; + b1 = C[3]*a11 - C[2]*a02; + + b2 = d[2] + e[ 0]; + b3 = d[3] - e[ 1]; + + d[2] = b2 + b0; + d[3] = b3 + b1; + e[0] = b2 - b0; + e[1] = b1 - b3; + + C += 4; + d += 4; + e -= 4; + } + } + + // data must be in buf2 + + + // step 8+decode (paper output is X, now buffer) + // this generates pairs of data a la 8 and pushes them directly through + // the decode kernel (pushing rather than pulling) to avoid having + // to make another pass later + + // this cannot POSSIBLY be in place, so we refer to the buffers directly + + { + float *d0,*d1,*d2,*d3; + + float *B = f->B[blocktype] + n2 - 8; + float *e = buf2 + n2 - 8; + d0 = &buffer[0]; + d1 = &buffer[n2-4]; + d2 = &buffer[n2]; + d3 = &buffer[n-4]; + while (e >= v) { + float p0,p1,p2,p3; + + p3 = e[6]*B[7] - e[7]*B[6]; + p2 = -e[6]*B[6] - e[7]*B[7]; + + d0[0] = p3; + d1[3] = - p3; + d2[0] = p2; + d3[3] = p2; + + p1 = e[4]*B[5] - e[5]*B[4]; + p0 = -e[4]*B[4] - e[5]*B[5]; + + d0[1] = p1; + d1[2] = - p1; + d2[1] = p0; + d3[2] = p0; + + p3 = e[2]*B[3] - e[3]*B[2]; + p2 = -e[2]*B[2] - e[3]*B[3]; + + d0[2] = p3; + d1[1] = - p3; + d2[2] = p2; + d3[1] = p2; + + p1 = e[0]*B[1] - e[1]*B[0]; + p0 = -e[0]*B[0] - e[1]*B[1]; + + d0[3] = p1; + d1[0] = - p1; + d2[3] = p0; + d3[0] = p0; + + B -= 8; + e -= 8; + d0 += 4; + d2 += 4; + d1 -= 4; + d3 -= 4; + } + } + + temp_free(f,buf2); + temp_alloc_restore(f,save_point); +} + +#if 0 +// this is the original version of the above code, if you want to optimize it from scratch +void inverse_mdct_naive(float *buffer, int n) +{ + float s; + float A[1 << 12], B[1 << 12], C[1 << 11]; + int i,k,k2,k4, n2 = n >> 1, n4 = n >> 2, n8 = n >> 3, l; + int n3_4 = n - n4, ld; + // how can they claim this only uses N words?! + // oh, because they're only used sparsely, whoops + float u[1 << 13], X[1 << 13], v[1 << 13], w[1 << 13]; + // set up twiddle factors + + for (k=k2=0; k < n4; ++k,k2+=2) { + A[k2 ] = (float) cos(4*k*M_PI/n); + A[k2+1] = (float) -sin(4*k*M_PI/n); + B[k2 ] = (float) cos((k2+1)*M_PI/n/2); + B[k2+1] = (float) sin((k2+1)*M_PI/n/2); + } + for (k=k2=0; k < n8; ++k,k2+=2) { + C[k2 ] = (float) cos(2*(k2+1)*M_PI/n); + C[k2+1] = (float) -sin(2*(k2+1)*M_PI/n); + } + + // IMDCT algorithm from "The use of multirate filter banks for coding of high quality digital audio" + // Note there are bugs in that pseudocode, presumably due to them attempting + // to rename the arrays nicely rather than representing the way their actual + // implementation bounces buffers back and forth. As a result, even in the + // "some formulars corrected" version, a direct implementation fails. These + // are noted below as "paper bug". + + // copy and reflect spectral data + for (k=0; k < n2; ++k) u[k] = buffer[k]; + for ( ; k < n ; ++k) u[k] = -buffer[n - k - 1]; + // kernel from paper + // step 1 + for (k=k2=k4=0; k < n4; k+=1, k2+=2, k4+=4) { + v[n-k4-1] = (u[k4] - u[n-k4-1]) * A[k2] - (u[k4+2] - u[n-k4-3])*A[k2+1]; + v[n-k4-3] = (u[k4] - u[n-k4-1]) * A[k2+1] + (u[k4+2] - u[n-k4-3])*A[k2]; + } + // step 2 + for (k=k4=0; k < n8; k+=1, k4+=4) { + w[n2+3+k4] = v[n2+3+k4] + v[k4+3]; + w[n2+1+k4] = v[n2+1+k4] + v[k4+1]; + w[k4+3] = (v[n2+3+k4] - v[k4+3])*A[n2-4-k4] - (v[n2+1+k4]-v[k4+1])*A[n2-3-k4]; + w[k4+1] = (v[n2+1+k4] - v[k4+1])*A[n2-4-k4] + (v[n2+3+k4]-v[k4+3])*A[n2-3-k4]; + } + // step 3 + ld = ilog(n) - 1; // ilog is off-by-one from normal definitions + for (l=0; l < ld-3; ++l) { + int k0 = n >> (l+2), k1 = 1 << (l+3); + int rlim = n >> (l+4), r4, r; + int s2lim = 1 << (l+2), s2; + for (r=r4=0; r < rlim; r4+=4,++r) { + for (s2=0; s2 < s2lim; s2+=2) { + u[n-1-k0*s2-r4] = w[n-1-k0*s2-r4] + w[n-1-k0*(s2+1)-r4]; + u[n-3-k0*s2-r4] = w[n-3-k0*s2-r4] + w[n-3-k0*(s2+1)-r4]; + u[n-1-k0*(s2+1)-r4] = (w[n-1-k0*s2-r4] - w[n-1-k0*(s2+1)-r4]) * A[r*k1] + - (w[n-3-k0*s2-r4] - w[n-3-k0*(s2+1)-r4]) * A[r*k1+1]; + u[n-3-k0*(s2+1)-r4] = (w[n-3-k0*s2-r4] - w[n-3-k0*(s2+1)-r4]) * A[r*k1] + + (w[n-1-k0*s2-r4] - w[n-1-k0*(s2+1)-r4]) * A[r*k1+1]; + } + } + if (l+1 < ld-3) { + // paper bug: ping-ponging of u&w here is omitted + memcpy(w, u, sizeof(u)); + } + } + + // step 4 + for (i=0; i < n8; ++i) { + int j = bit_reverse(i) >> (32-ld+3); + assert(j < n8); + if (i == j) { + // paper bug: original code probably swapped in place; if copying, + // need to directly copy in this case + int i8 = i << 3; + v[i8+1] = u[i8+1]; + v[i8+3] = u[i8+3]; + v[i8+5] = u[i8+5]; + v[i8+7] = u[i8+7]; + } else if (i < j) { + int i8 = i << 3, j8 = j << 3; + v[j8+1] = u[i8+1], v[i8+1] = u[j8 + 1]; + v[j8+3] = u[i8+3], v[i8+3] = u[j8 + 3]; + v[j8+5] = u[i8+5], v[i8+5] = u[j8 + 5]; + v[j8+7] = u[i8+7], v[i8+7] = u[j8 + 7]; + } + } + // step 5 + for (k=0; k < n2; ++k) { + w[k] = v[k*2+1]; + } + // step 6 + for (k=k2=k4=0; k < n8; ++k, k2 += 2, k4 += 4) { + u[n-1-k2] = w[k4]; + u[n-2-k2] = w[k4+1]; + u[n3_4 - 1 - k2] = w[k4+2]; + u[n3_4 - 2 - k2] = w[k4+3]; + } + // step 7 + for (k=k2=0; k < n8; ++k, k2 += 2) { + v[n2 + k2 ] = ( u[n2 + k2] + u[n-2-k2] + C[k2+1]*(u[n2+k2]-u[n-2-k2]) + C[k2]*(u[n2+k2+1]+u[n-2-k2+1]))/2; + v[n-2 - k2] = ( u[n2 + k2] + u[n-2-k2] - C[k2+1]*(u[n2+k2]-u[n-2-k2]) - C[k2]*(u[n2+k2+1]+u[n-2-k2+1]))/2; + v[n2+1+ k2] = ( u[n2+1+k2] - u[n-1-k2] + C[k2+1]*(u[n2+1+k2]+u[n-1-k2]) - C[k2]*(u[n2+k2]-u[n-2-k2]))/2; + v[n-1 - k2] = (-u[n2+1+k2] + u[n-1-k2] + C[k2+1]*(u[n2+1+k2]+u[n-1-k2]) - C[k2]*(u[n2+k2]-u[n-2-k2]))/2; + } + // step 8 + for (k=k2=0; k < n4; ++k,k2 += 2) { + X[k] = v[k2+n2]*B[k2 ] + v[k2+1+n2]*B[k2+1]; + X[n2-1-k] = v[k2+n2]*B[k2+1] - v[k2+1+n2]*B[k2 ]; + } + + // decode kernel to output + // determined the following value experimentally + // (by first figuring out what made inverse_mdct_slow work); then matching that here + // (probably vorbis encoder premultiplies by n or n/2, to save it on the decoder?) + s = 0.5; // theoretically would be n4 + + // [[[ note! the s value of 0.5 is compensated for by the B[] in the current code, + // so it needs to use the "old" B values to behave correctly, or else + // set s to 1.0 ]]] + for (i=0; i < n4 ; ++i) buffer[i] = s * X[i+n4]; + for ( ; i < n3_4; ++i) buffer[i] = -s * X[n3_4 - i - 1]; + for ( ; i < n ; ++i) buffer[i] = -s * X[i - n3_4]; +} +#endif + +static float *get_window(vorb *f, int len) +{ + len <<= 1; + if (len == f->blocksize_0) return f->window[0]; + if (len == f->blocksize_1) return f->window[1]; + return NULL; +} + +#ifndef STB_VORBIS_NO_DEFER_FLOOR +typedef int16 YTYPE; +#else +typedef int YTYPE; +#endif +static int do_floor(vorb *f, Mapping *map, int i, int n, float *target, YTYPE *finalY, uint8 *step2_flag) +{ + int n2 = n >> 1; + int s = map->chan[i].mux, floor; + floor = map->submap_floor[s]; + if (f->floor_types[floor] == 0) { + return error(f, VORBIS_invalid_stream); + } else { + Floor1 *g = &f->floor_config[floor].floor1; + int j,q; + int lx = 0, ly = finalY[0] * g->floor1_multiplier; + for (q=1; q < g->values; ++q) { + j = g->sorted_order[q]; + #ifndef STB_VORBIS_NO_DEFER_FLOOR + if (finalY[j] >= 0) + #else + if (step2_flag[j]) + #endif + { + int hy = finalY[j] * g->floor1_multiplier; + int hx = g->Xlist[j]; + if (lx != hx) + draw_line(target, lx,ly, hx,hy, n2); + CHECK(f); + lx = hx, ly = hy; + } + } + if (lx < n2) { + // optimization of: draw_line(target, lx,ly, n,ly, n2); + for (j=lx; j < n2; ++j) + LINE_OP(target[j], inverse_db_table[ly]); + CHECK(f); + } + } + return TRUE; +} + +// The meaning of "left" and "right" +// +// For a given frame: +// we compute samples from 0..n +// window_center is n/2 +// we'll window and mix the samples from left_start to left_end with data from the previous frame +// all of the samples from left_end to right_start can be output without mixing; however, +// this interval is 0-length except when transitioning between short and long frames +// all of the samples from right_start to right_end need to be mixed with the next frame, +// which we don't have, so those get saved in a buffer +// frame N's right_end-right_start, the number of samples to mix with the next frame, +// has to be the same as frame N+1's left_end-left_start (which they are by +// construction) + +static int vorbis_decode_initial(vorb *f, int *p_left_start, int *p_left_end, int *p_right_start, int *p_right_end, int *mode) +{ + Mode *m; + int i, n, prev, next, window_center; + f->channel_buffer_start = f->channel_buffer_end = 0; + + retry: + if (f->eof) return FALSE; + if (!maybe_start_packet(f)) + return FALSE; + // check packet type + if (get_bits(f,1) != 0) { + if (IS_PUSH_MODE(f)) + return error(f,VORBIS_bad_packet_type); + while (EOP != get8_packet(f)); + goto retry; + } + + if (f->alloc.alloc_buffer) + assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset); + + i = get_bits(f, ilog(f->mode_count-1)); + if (i == EOP) return FALSE; + if (i >= f->mode_count) return FALSE; + *mode = i; + m = f->mode_config + i; + if (m->blockflag) { + n = f->blocksize_1; + prev = get_bits(f,1); + next = get_bits(f,1); + } else { + prev = next = 0; + n = f->blocksize_0; + } + +// WINDOWING + + window_center = n >> 1; + if (m->blockflag && !prev) { + *p_left_start = (n - f->blocksize_0) >> 2; + *p_left_end = (n + f->blocksize_0) >> 2; + } else { + *p_left_start = 0; + *p_left_end = window_center; + } + if (m->blockflag && !next) { + *p_right_start = (n*3 - f->blocksize_0) >> 2; + *p_right_end = (n*3 + f->blocksize_0) >> 2; + } else { + *p_right_start = window_center; + *p_right_end = n; + } + + return TRUE; +} + +static int vorbis_decode_packet_rest(vorb *f, int *len, Mode *m, int left_start, int left_end, int right_start, int right_end, int *p_left) +{ + Mapping *map; + int i,j,k,n,n2; + int zero_channel[256]; + int really_zero_channel[256]; + +// WINDOWING + + n = f->blocksize[m->blockflag]; + map = &f->mapping[m->mapping]; + +// FLOORS + n2 = n >> 1; + + CHECK(f); + + for (i=0; i < f->channels; ++i) { + int s = map->chan[i].mux, floor; + zero_channel[i] = FALSE; + floor = map->submap_floor[s]; + if (f->floor_types[floor] == 0) { + return error(f, VORBIS_invalid_stream); + } else { + Floor1 *g = &f->floor_config[floor].floor1; + if (get_bits(f, 1)) { + short *finalY; + uint8 step2_flag[256]; + static int range_list[4] = { 256, 128, 86, 64 }; + int range = range_list[g->floor1_multiplier-1]; + int offset = 2; + finalY = f->finalY[i]; + finalY[0] = get_bits(f, ilog(range)-1); + finalY[1] = get_bits(f, ilog(range)-1); + for (j=0; j < g->partitions; ++j) { + int pclass = g->partition_class_list[j]; + int cdim = g->class_dimensions[pclass]; + int cbits = g->class_subclasses[pclass]; + int csub = (1 << cbits)-1; + int cval = 0; + if (cbits) { + Codebook *c = f->codebooks + g->class_masterbooks[pclass]; + DECODE(cval,f,c); + } + for (k=0; k < cdim; ++k) { + int book = g->subclass_books[pclass][cval & csub]; + cval = cval >> cbits; + if (book >= 0) { + int temp; + Codebook *c = f->codebooks + book; + DECODE(temp,f,c); + finalY[offset++] = temp; + } else + finalY[offset++] = 0; + } + } + if (f->valid_bits == INVALID_BITS) goto error; // behavior according to spec + step2_flag[0] = step2_flag[1] = 1; + for (j=2; j < g->values; ++j) { + int low, high, pred, highroom, lowroom, room, val; + low = g->neighbors[j][0]; + high = g->neighbors[j][1]; + //neighbors(g->Xlist, j, &low, &high); + pred = predict_point(g->Xlist[j], g->Xlist[low], g->Xlist[high], finalY[low], finalY[high]); + val = finalY[j]; + highroom = range - pred; + lowroom = pred; + if (highroom < lowroom) + room = highroom * 2; + else + room = lowroom * 2; + if (val) { + step2_flag[low] = step2_flag[high] = 1; + step2_flag[j] = 1; + if (val >= room) + if (highroom > lowroom) + finalY[j] = val - lowroom + pred; + else + finalY[j] = pred - val + highroom - 1; + else + if (val & 1) + finalY[j] = pred - ((val+1)>>1); + else + finalY[j] = pred + (val>>1); + } else { + step2_flag[j] = 0; + finalY[j] = pred; + } + } + +#ifdef STB_VORBIS_NO_DEFER_FLOOR + do_floor(f, map, i, n, f->floor_buffers[i], finalY, step2_flag); +#else + // defer final floor computation until _after_ residue + for (j=0; j < g->values; ++j) { + if (!step2_flag[j]) + finalY[j] = -1; + } +#endif + } else { + error: + zero_channel[i] = TRUE; + } + // So we just defer everything else to later + + // at this point we've decoded the floor into buffer + } + } + CHECK(f); + // at this point we've decoded all floors + + if (f->alloc.alloc_buffer) + assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset); + + // re-enable coupled channels if necessary + memcpy(really_zero_channel, zero_channel, sizeof(really_zero_channel[0]) * f->channels); + for (i=0; i < map->coupling_steps; ++i) + if (!zero_channel[map->chan[i].magnitude] || !zero_channel[map->chan[i].angle]) { + zero_channel[map->chan[i].magnitude] = zero_channel[map->chan[i].angle] = FALSE; + } + + CHECK(f); +// RESIDUE DECODE + for (i=0; i < map->submaps; ++i) { + float *residue_buffers[STB_VORBIS_MAX_CHANNELS]; + int r; + uint8 do_not_decode[256]; + int ch = 0; + for (j=0; j < f->channels; ++j) { + if (map->chan[j].mux == i) { + if (zero_channel[j]) { + do_not_decode[ch] = TRUE; + residue_buffers[ch] = NULL; + } else { + do_not_decode[ch] = FALSE; + residue_buffers[ch] = f->channel_buffers[j]; + } + ++ch; + } + } + r = map->submap_residue[i]; + decode_residue(f, residue_buffers, ch, n2, r, do_not_decode); + } + + if (f->alloc.alloc_buffer) + assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset); + CHECK(f); + +// INVERSE COUPLING + for (i = map->coupling_steps-1; i >= 0; --i) { + int n2 = n >> 1; + float *m = f->channel_buffers[map->chan[i].magnitude]; + float *a = f->channel_buffers[map->chan[i].angle ]; + for (j=0; j < n2; ++j) { + float a2,m2; + if (m[j] > 0) + if (a[j] > 0) + m2 = m[j], a2 = m[j] - a[j]; + else + a2 = m[j], m2 = m[j] + a[j]; + else + if (a[j] > 0) + m2 = m[j], a2 = m[j] + a[j]; + else + a2 = m[j], m2 = m[j] - a[j]; + m[j] = m2; + a[j] = a2; + } + } + CHECK(f); + + // finish decoding the floors +#ifndef STB_VORBIS_NO_DEFER_FLOOR + for (i=0; i < f->channels; ++i) { + if (really_zero_channel[i]) { + memset(f->channel_buffers[i], 0, sizeof(*f->channel_buffers[i]) * n2); + } else { + do_floor(f, map, i, n, f->channel_buffers[i], f->finalY[i], NULL); + } + } +#else + for (i=0; i < f->channels; ++i) { + if (really_zero_channel[i]) { + memset(f->channel_buffers[i], 0, sizeof(*f->channel_buffers[i]) * n2); + } else { + for (j=0; j < n2; ++j) + f->channel_buffers[i][j] *= f->floor_buffers[i][j]; + } + } +#endif + +// INVERSE MDCT + CHECK(f); + for (i=0; i < f->channels; ++i) + inverse_mdct(f->channel_buffers[i], n, f, m->blockflag); + CHECK(f); + + // this shouldn't be necessary, unless we exited on an error + // and want to flush to get to the next packet + flush_packet(f); + + if (f->first_decode) { + // assume we start so first non-discarded sample is sample 0 + // this isn't to spec, but spec would require us to read ahead + // and decode the size of all current frames--could be done, + // but presumably it's not a commonly used feature + f->current_loc = -n2; // start of first frame is positioned for discard + // we might have to discard samples "from" the next frame too, + // if we're lapping a large block then a small at the start? + f->discard_samples_deferred = n - right_end; + f->current_loc_valid = TRUE; + f->first_decode = FALSE; + } else if (f->discard_samples_deferred) { + if (f->discard_samples_deferred >= right_start - left_start) { + f->discard_samples_deferred -= (right_start - left_start); + left_start = right_start; + *p_left = left_start; + } else { + left_start += f->discard_samples_deferred; + *p_left = left_start; + f->discard_samples_deferred = 0; + } + } else if (f->previous_length == 0 && f->current_loc_valid) { + // we're recovering from a seek... that means we're going to discard + // the samples from this packet even though we know our position from + // the last page header, so we need to update the position based on + // the discarded samples here + // but wait, the code below is going to add this in itself even + // on a discard, so we don't need to do it here... + } + + // check if we have ogg information about the sample # for this packet + if (f->last_seg_which == f->end_seg_with_known_loc) { + // if we have a valid current loc, and this is final: + if (f->current_loc_valid && (f->page_flag & PAGEFLAG_last_page)) { + uint32 current_end = f->known_loc_for_packet; + // then let's infer the size of the (probably) short final frame + if (current_end < f->current_loc + (right_end-left_start)) { + if (current_end < f->current_loc) { + // negative truncation, that's impossible! + *len = 0; + } else { + *len = current_end - f->current_loc; + } + *len += left_start; // this doesn't seem right, but has no ill effect on my test files + if (*len > right_end) *len = right_end; // this should never happen + f->current_loc += *len; + return TRUE; + } + } + // otherwise, just set our sample loc + // guess that the ogg granule pos refers to the _middle_ of the + // last frame? + // set f->current_loc to the position of left_start + f->current_loc = f->known_loc_for_packet - (n2-left_start); + f->current_loc_valid = TRUE; + } + if (f->current_loc_valid) + f->current_loc += (right_start - left_start); + + if (f->alloc.alloc_buffer) + assert(f->alloc.alloc_buffer_length_in_bytes == f->temp_offset); + *len = right_end; // ignore samples after the window goes to 0 + CHECK(f); + + return TRUE; +} + +static int vorbis_decode_packet(vorb *f, int *len, int *p_left, int *p_right) +{ + int mode, left_end, right_end; + if (!vorbis_decode_initial(f, p_left, &left_end, p_right, &right_end, &mode)) return 0; + return vorbis_decode_packet_rest(f, len, f->mode_config + mode, *p_left, left_end, *p_right, right_end, p_left); +} + +static int vorbis_finish_frame(stb_vorbis *f, int len, int left, int right) +{ + int prev,i,j; + // we use right&left (the start of the right- and left-window sin()-regions) + // to determine how much to return, rather than inferring from the rules + // (same result, clearer code); 'left' indicates where our sin() window + // starts, therefore where the previous window's right edge starts, and + // therefore where to start mixing from the previous buffer. 'right' + // indicates where our sin() ending-window starts, therefore that's where + // we start saving, and where our returned-data ends. + + // mixin from previous window + if (f->previous_length) { + int i,j, n = f->previous_length; + float *w = get_window(f, n); + if (w == NULL) return 0; + for (i=0; i < f->channels; ++i) { + for (j=0; j < n; ++j) + f->channel_buffers[i][left+j] = + f->channel_buffers[i][left+j]*w[ j] + + f->previous_window[i][ j]*w[n-1-j]; + } + } + + prev = f->previous_length; + + // last half of this data becomes previous window + f->previous_length = len - right; + + // @OPTIMIZE: could avoid this copy by double-buffering the + // output (flipping previous_window with channel_buffers), but + // then previous_window would have to be 2x as large, and + // channel_buffers couldn't be temp mem (although they're NOT + // currently temp mem, they could be (unless we want to level + // performance by spreading out the computation)) + for (i=0; i < f->channels; ++i) + for (j=0; right+j < len; ++j) + f->previous_window[i][j] = f->channel_buffers[i][right+j]; + + if (!prev) + // there was no previous packet, so this data isn't valid... + // this isn't entirely true, only the would-have-overlapped data + // isn't valid, but this seems to be what the spec requires + return 0; + + // truncate a short frame + if (len < right) right = len; + + f->samples_output += right-left; + + return right - left; +} + +static int vorbis_pump_first_frame(stb_vorbis *f) +{ + int len, right, left, res; + res = vorbis_decode_packet(f, &len, &left, &right); + if (res) + vorbis_finish_frame(f, len, left, right); + return res; +} + +#ifndef STB_VORBIS_NO_PUSHDATA_API +static int is_whole_packet_present(stb_vorbis *f) +{ + // make sure that we have the packet available before continuing... + // this requires a full ogg parse, but we know we can fetch from f->stream + + // instead of coding this out explicitly, we could save the current read state, + // read the next packet with get8() until end-of-packet, check f->eof, then + // reset the state? but that would be slower, esp. since we'd have over 256 bytes + // of state to restore (primarily the page segment table) + + int s = f->next_seg, first = TRUE; + uint8 *p = f->stream; + + if (s != -1) { // if we're not starting the packet with a 'continue on next page' flag + for (; s < f->segment_count; ++s) { + p += f->segments[s]; + if (f->segments[s] < 255) // stop at first short segment + break; + } + // either this continues, or it ends it... + if (s == f->segment_count) + s = -1; // set 'crosses page' flag + if (p > f->stream_end) return error(f, VORBIS_need_more_data); + first = FALSE; + } + for (; s == -1;) { + uint8 *q; + int n; + + // check that we have the page header ready + if (p + 26 >= f->stream_end) return error(f, VORBIS_need_more_data); + // validate the page + if (memcmp(p, ogg_page_header, 4)) return error(f, VORBIS_invalid_stream); + if (p[4] != 0) return error(f, VORBIS_invalid_stream); + if (first) { // the first segment must NOT have 'continued_packet', later ones MUST + if (f->previous_length) + if ((p[5] & PAGEFLAG_continued_packet)) return error(f, VORBIS_invalid_stream); + // if no previous length, we're resynching, so we can come in on a continued-packet, + // which we'll just drop + } else { + if (!(p[5] & PAGEFLAG_continued_packet)) return error(f, VORBIS_invalid_stream); + } + n = p[26]; // segment counts + q = p+27; // q points to segment table + p = q + n; // advance past header + // make sure we've read the segment table + if (p > f->stream_end) return error(f, VORBIS_need_more_data); + for (s=0; s < n; ++s) { + p += q[s]; + if (q[s] < 255) + break; + } + if (s == n) + s = -1; // set 'crosses page' flag + if (p > f->stream_end) return error(f, VORBIS_need_more_data); + first = FALSE; + } + return TRUE; +} +#endif // !STB_VORBIS_NO_PUSHDATA_API + +static int start_decoder(vorb *f) +{ + uint8 header[6], x,y; + int len,i,j,k, max_submaps = 0; + int longest_floorlist=0; + + // first page, first packet + f->first_decode = TRUE; + + if (!start_page(f)) return FALSE; + // validate page flag + if (!(f->page_flag & PAGEFLAG_first_page)) return error(f, VORBIS_invalid_first_page); + if (f->page_flag & PAGEFLAG_last_page) return error(f, VORBIS_invalid_first_page); + if (f->page_flag & PAGEFLAG_continued_packet) return error(f, VORBIS_invalid_first_page); + // check for expected packet length + if (f->segment_count != 1) return error(f, VORBIS_invalid_first_page); + if (f->segments[0] != 30) { + // check for the Ogg skeleton fishead identifying header to refine our error + if (f->segments[0] == 64 && + getn(f, header, 6) && + header[0] == 'f' && + header[1] == 'i' && + header[2] == 's' && + header[3] == 'h' && + header[4] == 'e' && + header[5] == 'a' && + get8(f) == 'd' && + get8(f) == '\0') return error(f, VORBIS_ogg_skeleton_not_supported); + else + return error(f, VORBIS_invalid_first_page); + } + + // read packet + // check packet header + if (get8(f) != VORBIS_packet_id) return error(f, VORBIS_invalid_first_page); + if (!getn(f, header, 6)) return error(f, VORBIS_unexpected_eof); + if (!vorbis_validate(header)) return error(f, VORBIS_invalid_first_page); + // vorbis_version + if (get32(f) != 0) return error(f, VORBIS_invalid_first_page); + f->channels = get8(f); if (!f->channels) return error(f, VORBIS_invalid_first_page); + if (f->channels > STB_VORBIS_MAX_CHANNELS) return error(f, VORBIS_too_many_channels); + f->sample_rate = get32(f); if (!f->sample_rate) return error(f, VORBIS_invalid_first_page); + get32(f); // bitrate_maximum + get32(f); // bitrate_nominal + get32(f); // bitrate_minimum + x = get8(f); + { + int log0,log1; + log0 = x & 15; + log1 = x >> 4; + f->blocksize_0 = 1 << log0; + f->blocksize_1 = 1 << log1; + if (log0 < 6 || log0 > 13) return error(f, VORBIS_invalid_setup); + if (log1 < 6 || log1 > 13) return error(f, VORBIS_invalid_setup); + if (log0 > log1) return error(f, VORBIS_invalid_setup); + } + + // framing_flag + x = get8(f); + if (!(x & 1)) return error(f, VORBIS_invalid_first_page); + + // second packet! + if (!start_page(f)) return FALSE; + + if (!start_packet(f)) return FALSE; + + if (!next_segment(f)) return FALSE; + + if (get8_packet(f) != VORBIS_packet_comment) return error(f, VORBIS_invalid_setup); + for (i=0; i < 6; ++i) header[i] = get8_packet(f); + if (!vorbis_validate(header)) return error(f, VORBIS_invalid_setup); + //file vendor + len = get32_packet(f); + f->vendor = (char*)setup_malloc(f, sizeof(char) * (len+1)); + for(i=0; i < len; ++i) { + f->vendor[i] = get8_packet(f); + } + f->vendor[len] = (char)'\0'; + //user comments + f->comment_list_length = get32_packet(f); + f->comment_list = (char**)setup_malloc(f, sizeof(char*) * (f->comment_list_length)); + + for(i=0; i < f->comment_list_length; ++i) { + len = get32_packet(f); + f->comment_list[i] = (char*)setup_malloc(f, sizeof(char) * (len+1)); + + for(j=0; j < len; ++j) { + f->comment_list[i][j] = get8_packet(f); + } + f->comment_list[i][len] = (char)'\0'; + } + + // framing_flag + x = get8_packet(f); + if (!(x & 1)) return error(f, VORBIS_invalid_setup); + + + skip(f, f->bytes_in_seg); + f->bytes_in_seg = 0; + + do { + len = next_segment(f); + skip(f, len); + f->bytes_in_seg = 0; + } while (len); + + // third packet! + if (!start_packet(f)) return FALSE; + + #ifndef STB_VORBIS_NO_PUSHDATA_API + if (IS_PUSH_MODE(f)) { + if (!is_whole_packet_present(f)) { + // convert error in ogg header to write type + if (f->error == VORBIS_invalid_stream) + f->error = VORBIS_invalid_setup; + return FALSE; + } + } + #endif + + crc32_init(); // always init it, to avoid multithread race conditions + + if (get8_packet(f) != VORBIS_packet_setup) return error(f, VORBIS_invalid_setup); + for (i=0; i < 6; ++i) header[i] = get8_packet(f); + if (!vorbis_validate(header)) return error(f, VORBIS_invalid_setup); + + // codebooks + + f->codebook_count = get_bits(f,8) + 1; + f->codebooks = (Codebook *) setup_malloc(f, sizeof(*f->codebooks) * f->codebook_count); + if (f->codebooks == NULL) return error(f, VORBIS_outofmem); + memset(f->codebooks, 0, sizeof(*f->codebooks) * f->codebook_count); + for (i=0; i < f->codebook_count; ++i) { + uint32 *values; + int ordered, sorted_count; + int total=0; + uint8 *lengths; + Codebook *c = f->codebooks+i; + CHECK(f); + x = get_bits(f, 8); if (x != 0x42) return error(f, VORBIS_invalid_setup); + x = get_bits(f, 8); if (x != 0x43) return error(f, VORBIS_invalid_setup); + x = get_bits(f, 8); if (x != 0x56) return error(f, VORBIS_invalid_setup); + x = get_bits(f, 8); + c->dimensions = (get_bits(f, 8)<<8) + x; + x = get_bits(f, 8); + y = get_bits(f, 8); + c->entries = (get_bits(f, 8)<<16) + (y<<8) + x; + ordered = get_bits(f,1); + c->sparse = ordered ? 0 : get_bits(f,1); + + if (c->dimensions == 0 && c->entries != 0) return error(f, VORBIS_invalid_setup); + + if (c->sparse) + lengths = (uint8 *) setup_temp_malloc(f, c->entries); + else + lengths = c->codeword_lengths = (uint8 *) setup_malloc(f, c->entries); + + if (!lengths) return error(f, VORBIS_outofmem); + + if (ordered) { + int current_entry = 0; + int current_length = get_bits(f,5) + 1; + while (current_entry < c->entries) { + int limit = c->entries - current_entry; + int n = get_bits(f, ilog(limit)); + if (current_length >= 32) return error(f, VORBIS_invalid_setup); + if (current_entry + n > (int) c->entries) { return error(f, VORBIS_invalid_setup); } + memset(lengths + current_entry, current_length, n); + current_entry += n; + ++current_length; + } + } else { + for (j=0; j < c->entries; ++j) { + int present = c->sparse ? get_bits(f,1) : 1; + if (present) { + lengths[j] = get_bits(f, 5) + 1; + ++total; + if (lengths[j] == 32) + return error(f, VORBIS_invalid_setup); + } else { + lengths[j] = NO_CODE; + } + } + } + + if (c->sparse && total >= c->entries >> 2) { + // convert sparse items to non-sparse! + if (c->entries > (int) f->setup_temp_memory_required) + f->setup_temp_memory_required = c->entries; + + c->codeword_lengths = (uint8 *) setup_malloc(f, c->entries); + if (c->codeword_lengths == NULL) return error(f, VORBIS_outofmem); + memcpy(c->codeword_lengths, lengths, c->entries); + setup_temp_free(f, lengths, c->entries); // note this is only safe if there have been no intervening temp mallocs! + lengths = c->codeword_lengths; + c->sparse = 0; + } + + // compute the size of the sorted tables + if (c->sparse) { + sorted_count = total; + } else { + sorted_count = 0; + #ifndef STB_VORBIS_NO_HUFFMAN_BINARY_SEARCH + for (j=0; j < c->entries; ++j) + if (lengths[j] > STB_VORBIS_FAST_HUFFMAN_LENGTH && lengths[j] != NO_CODE) + ++sorted_count; + #endif + } + + c->sorted_entries = sorted_count; + values = NULL; + + CHECK(f); + if (!c->sparse) { + c->codewords = (uint32 *) setup_malloc(f, sizeof(c->codewords[0]) * c->entries); + if (!c->codewords) return error(f, VORBIS_outofmem); + } else { + unsigned int size; + if (c->sorted_entries) { + c->codeword_lengths = (uint8 *) setup_malloc(f, c->sorted_entries); + if (!c->codeword_lengths) return error(f, VORBIS_outofmem); + c->codewords = (uint32 *) setup_temp_malloc(f, sizeof(*c->codewords) * c->sorted_entries); + if (!c->codewords) return error(f, VORBIS_outofmem); + values = (uint32 *) setup_temp_malloc(f, sizeof(*values) * c->sorted_entries); + if (!values) return error(f, VORBIS_outofmem); + } + size = c->entries + (sizeof(*c->codewords) + sizeof(*values)) * c->sorted_entries; + if (size > f->setup_temp_memory_required) + f->setup_temp_memory_required = size; + } + + if (!compute_codewords(c, lengths, c->entries, values)) { + if (c->sparse) setup_temp_free(f, values, 0); + return error(f, VORBIS_invalid_setup); + } + + if (c->sorted_entries) { + // allocate an extra slot for sentinels + c->sorted_codewords = (uint32 *) setup_malloc(f, sizeof(*c->sorted_codewords) * (c->sorted_entries+1)); + if (c->sorted_codewords == NULL) return error(f, VORBIS_outofmem); + // allocate an extra slot at the front so that c->sorted_values[-1] is defined + // so that we can catch that case without an extra if + c->sorted_values = ( int *) setup_malloc(f, sizeof(*c->sorted_values ) * (c->sorted_entries+1)); + if (c->sorted_values == NULL) return error(f, VORBIS_outofmem); + ++c->sorted_values; + c->sorted_values[-1] = -1; + compute_sorted_huffman(c, lengths, values); + } + + if (c->sparse) { + setup_temp_free(f, values, sizeof(*values)*c->sorted_entries); + setup_temp_free(f, c->codewords, sizeof(*c->codewords)*c->sorted_entries); + setup_temp_free(f, lengths, c->entries); + c->codewords = NULL; + } + + compute_accelerated_huffman(c); + + CHECK(f); + c->lookup_type = get_bits(f, 4); + if (c->lookup_type > 2) return error(f, VORBIS_invalid_setup); + if (c->lookup_type > 0) { + uint16 *mults; + c->minimum_value = float32_unpack(get_bits(f, 32)); + c->delta_value = float32_unpack(get_bits(f, 32)); + c->value_bits = get_bits(f, 4)+1; + c->sequence_p = get_bits(f,1); + if (c->lookup_type == 1) { + int values = lookup1_values(c->entries, c->dimensions); + if (values < 0) return error(f, VORBIS_invalid_setup); + c->lookup_values = (uint32) values; + } else { + c->lookup_values = c->entries * c->dimensions; + } + if (c->lookup_values == 0) return error(f, VORBIS_invalid_setup); + mults = (uint16 *) setup_temp_malloc(f, sizeof(mults[0]) * c->lookup_values); + if (mults == NULL) return error(f, VORBIS_outofmem); + for (j=0; j < (int) c->lookup_values; ++j) { + int q = get_bits(f, c->value_bits); + if (q == EOP) { setup_temp_free(f,mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_invalid_setup); } + mults[j] = q; + } + +#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK + if (c->lookup_type == 1) { + int len, sparse = c->sparse; + float last=0; + // pre-expand the lookup1-style multiplicands, to avoid a divide in the inner loop + if (sparse) { + if (c->sorted_entries == 0) goto skip; + c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->sorted_entries * c->dimensions); + } else + c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->entries * c->dimensions); + if (c->multiplicands == NULL) { setup_temp_free(f,mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_outofmem); } + len = sparse ? c->sorted_entries : c->entries; + for (j=0; j < len; ++j) { + unsigned int z = sparse ? c->sorted_values[j] : j; + unsigned int div=1; + for (k=0; k < c->dimensions; ++k) { + int off = (z / div) % c->lookup_values; + float val = mults[off]; + val = mults[off]*c->delta_value + c->minimum_value + last; + c->multiplicands[j*c->dimensions + k] = val; + if (c->sequence_p) + last = val; + if (k+1 < c->dimensions) { + if (div > UINT_MAX / (unsigned int) c->lookup_values) { + setup_temp_free(f, mults,sizeof(mults[0])*c->lookup_values); + return error(f, VORBIS_invalid_setup); + } + div *= c->lookup_values; + } + } + } + c->lookup_type = 2; + } + else +#endif + { + float last=0; + CHECK(f); + c->multiplicands = (codetype *) setup_malloc(f, sizeof(c->multiplicands[0]) * c->lookup_values); + if (c->multiplicands == NULL) { setup_temp_free(f, mults,sizeof(mults[0])*c->lookup_values); return error(f, VORBIS_outofmem); } + for (j=0; j < (int) c->lookup_values; ++j) { + float val = mults[j] * c->delta_value + c->minimum_value + last; + c->multiplicands[j] = val; + if (c->sequence_p) + last = val; + } + } +#ifndef STB_VORBIS_DIVIDES_IN_CODEBOOK + skip:; +#endif + setup_temp_free(f, mults, sizeof(mults[0])*c->lookup_values); + + CHECK(f); + } + CHECK(f); + } + + // time domain transfers (notused) + + x = get_bits(f, 6) + 1; + for (i=0; i < x; ++i) { + uint32 z = get_bits(f, 16); + if (z != 0) return error(f, VORBIS_invalid_setup); + } + + // Floors + f->floor_count = get_bits(f, 6)+1; + f->floor_config = (Floor *) setup_malloc(f, f->floor_count * sizeof(*f->floor_config)); + if (f->floor_config == NULL) return error(f, VORBIS_outofmem); + for (i=0; i < f->floor_count; ++i) { + f->floor_types[i] = get_bits(f, 16); + if (f->floor_types[i] > 1) return error(f, VORBIS_invalid_setup); + if (f->floor_types[i] == 0) { + Floor0 *g = &f->floor_config[i].floor0; + g->order = get_bits(f,8); + g->rate = get_bits(f,16); + g->bark_map_size = get_bits(f,16); + g->amplitude_bits = get_bits(f,6); + g->amplitude_offset = get_bits(f,8); + g->number_of_books = get_bits(f,4) + 1; + for (j=0; j < g->number_of_books; ++j) + g->book_list[j] = get_bits(f,8); + return error(f, VORBIS_feature_not_supported); + } else { + stbv__floor_ordering p[31*8+2]; + Floor1 *g = &f->floor_config[i].floor1; + int max_class = -1; + g->partitions = get_bits(f, 5); + for (j=0; j < g->partitions; ++j) { + g->partition_class_list[j] = get_bits(f, 4); + if (g->partition_class_list[j] > max_class) + max_class = g->partition_class_list[j]; + } + for (j=0; j <= max_class; ++j) { + g->class_dimensions[j] = get_bits(f, 3)+1; + g->class_subclasses[j] = get_bits(f, 2); + if (g->class_subclasses[j]) { + g->class_masterbooks[j] = get_bits(f, 8); + if (g->class_masterbooks[j] >= f->codebook_count) return error(f, VORBIS_invalid_setup); + } + for (k=0; k < 1 << g->class_subclasses[j]; ++k) { + g->subclass_books[j][k] = get_bits(f,8)-1; + if (g->subclass_books[j][k] >= f->codebook_count) return error(f, VORBIS_invalid_setup); + } + } + g->floor1_multiplier = get_bits(f,2)+1; + g->rangebits = get_bits(f,4); + g->Xlist[0] = 0; + g->Xlist[1] = 1 << g->rangebits; + g->values = 2; + for (j=0; j < g->partitions; ++j) { + int c = g->partition_class_list[j]; + for (k=0; k < g->class_dimensions[c]; ++k) { + g->Xlist[g->values] = get_bits(f, g->rangebits); + ++g->values; + } + } + // precompute the sorting + for (j=0; j < g->values; ++j) { + p[j].x = g->Xlist[j]; + p[j].id = j; + } + qsort(p, g->values, sizeof(p[0]), point_compare); + for (j=0; j < g->values-1; ++j) + if (p[j].x == p[j+1].x) + return error(f, VORBIS_invalid_setup); + for (j=0; j < g->values; ++j) + g->sorted_order[j] = (uint8) p[j].id; + // precompute the neighbors + for (j=2; j < g->values; ++j) { + int low = 0,hi = 0; + neighbors(g->Xlist, j, &low,&hi); + g->neighbors[j][0] = low; + g->neighbors[j][1] = hi; + } + + if (g->values > longest_floorlist) + longest_floorlist = g->values; + } + } + + // Residue + f->residue_count = get_bits(f, 6)+1; + f->residue_config = (Residue *) setup_malloc(f, f->residue_count * sizeof(f->residue_config[0])); + if (f->residue_config == NULL) return error(f, VORBIS_outofmem); + memset(f->residue_config, 0, f->residue_count * sizeof(f->residue_config[0])); + for (i=0; i < f->residue_count; ++i) { + uint8 residue_cascade[64]; + Residue *r = f->residue_config+i; + f->residue_types[i] = get_bits(f, 16); + if (f->residue_types[i] > 2) return error(f, VORBIS_invalid_setup); + r->begin = get_bits(f, 24); + r->end = get_bits(f, 24); + if (r->end < r->begin) return error(f, VORBIS_invalid_setup); + r->part_size = get_bits(f,24)+1; + r->classifications = get_bits(f,6)+1; + r->classbook = get_bits(f,8); + if (r->classbook >= f->codebook_count) return error(f, VORBIS_invalid_setup); + for (j=0; j < r->classifications; ++j) { + uint8 high_bits=0; + uint8 low_bits=get_bits(f,3); + if (get_bits(f,1)) + high_bits = get_bits(f,5); + residue_cascade[j] = high_bits*8 + low_bits; + } + r->residue_books = (short (*)[8]) setup_malloc(f, sizeof(r->residue_books[0]) * r->classifications); + if (r->residue_books == NULL) return error(f, VORBIS_outofmem); + for (j=0; j < r->classifications; ++j) { + for (k=0; k < 8; ++k) { + if (residue_cascade[j] & (1 << k)) { + r->residue_books[j][k] = get_bits(f, 8); + if (r->residue_books[j][k] >= f->codebook_count) return error(f, VORBIS_invalid_setup); + } else { + r->residue_books[j][k] = -1; + } + } + } + // precompute the classifications[] array to avoid inner-loop mod/divide + // call it 'classdata' since we already have r->classifications + r->classdata = (uint8 **) setup_malloc(f, sizeof(*r->classdata) * f->codebooks[r->classbook].entries); + if (!r->classdata) return error(f, VORBIS_outofmem); + memset(r->classdata, 0, sizeof(*r->classdata) * f->codebooks[r->classbook].entries); + for (j=0; j < f->codebooks[r->classbook].entries; ++j) { + int classwords = f->codebooks[r->classbook].dimensions; + int temp = j; + r->classdata[j] = (uint8 *) setup_malloc(f, sizeof(r->classdata[j][0]) * classwords); + if (r->classdata[j] == NULL) return error(f, VORBIS_outofmem); + for (k=classwords-1; k >= 0; --k) { + r->classdata[j][k] = temp % r->classifications; + temp /= r->classifications; + } + } + } + + f->mapping_count = get_bits(f,6)+1; + f->mapping = (Mapping *) setup_malloc(f, f->mapping_count * sizeof(*f->mapping)); + if (f->mapping == NULL) return error(f, VORBIS_outofmem); + memset(f->mapping, 0, f->mapping_count * sizeof(*f->mapping)); + for (i=0; i < f->mapping_count; ++i) { + Mapping *m = f->mapping + i; + int mapping_type = get_bits(f,16); + if (mapping_type != 0) return error(f, VORBIS_invalid_setup); + m->chan = (MappingChannel *) setup_malloc(f, f->channels * sizeof(*m->chan)); + if (m->chan == NULL) return error(f, VORBIS_outofmem); + if (get_bits(f,1)) + m->submaps = get_bits(f,4)+1; + else + m->submaps = 1; + if (m->submaps > max_submaps) + max_submaps = m->submaps; + if (get_bits(f,1)) { + m->coupling_steps = get_bits(f,8)+1; + if (m->coupling_steps > f->channels) return error(f, VORBIS_invalid_setup); + for (k=0; k < m->coupling_steps; ++k) { + m->chan[k].magnitude = get_bits(f, ilog(f->channels-1)); + m->chan[k].angle = get_bits(f, ilog(f->channels-1)); + if (m->chan[k].magnitude >= f->channels) return error(f, VORBIS_invalid_setup); + if (m->chan[k].angle >= f->channels) return error(f, VORBIS_invalid_setup); + if (m->chan[k].magnitude == m->chan[k].angle) return error(f, VORBIS_invalid_setup); + } + } else + m->coupling_steps = 0; + + // reserved field + if (get_bits(f,2)) return error(f, VORBIS_invalid_setup); + if (m->submaps > 1) { + for (j=0; j < f->channels; ++j) { + m->chan[j].mux = get_bits(f, 4); + if (m->chan[j].mux >= m->submaps) return error(f, VORBIS_invalid_setup); + } + } else + // @SPECIFICATION: this case is missing from the spec + for (j=0; j < f->channels; ++j) + m->chan[j].mux = 0; + + for (j=0; j < m->submaps; ++j) { + get_bits(f,8); // discard + m->submap_floor[j] = get_bits(f,8); + m->submap_residue[j] = get_bits(f,8); + if (m->submap_floor[j] >= f->floor_count) return error(f, VORBIS_invalid_setup); + if (m->submap_residue[j] >= f->residue_count) return error(f, VORBIS_invalid_setup); + } + } + + // Modes + f->mode_count = get_bits(f, 6)+1; + for (i=0; i < f->mode_count; ++i) { + Mode *m = f->mode_config+i; + m->blockflag = get_bits(f,1); + m->windowtype = get_bits(f,16); + m->transformtype = get_bits(f,16); + m->mapping = get_bits(f,8); + if (m->windowtype != 0) return error(f, VORBIS_invalid_setup); + if (m->transformtype != 0) return error(f, VORBIS_invalid_setup); + if (m->mapping >= f->mapping_count) return error(f, VORBIS_invalid_setup); + } + + flush_packet(f); + + f->previous_length = 0; + + for (i=0; i < f->channels; ++i) { + f->channel_buffers[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1); + f->previous_window[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2); + f->finalY[i] = (int16 *) setup_malloc(f, sizeof(int16) * longest_floorlist); + if (f->channel_buffers[i] == NULL || f->previous_window[i] == NULL || f->finalY[i] == NULL) return error(f, VORBIS_outofmem); + memset(f->channel_buffers[i], 0, sizeof(float) * f->blocksize_1); + #ifdef STB_VORBIS_NO_DEFER_FLOOR + f->floor_buffers[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2); + if (f->floor_buffers[i] == NULL) return error(f, VORBIS_outofmem); + #endif + } + + if (!init_blocksize(f, 0, f->blocksize_0)) return FALSE; + if (!init_blocksize(f, 1, f->blocksize_1)) return FALSE; + f->blocksize[0] = f->blocksize_0; + f->blocksize[1] = f->blocksize_1; + +#ifdef STB_VORBIS_DIVIDE_TABLE + if (integer_divide_table[1][1]==0) + for (i=0; i < DIVTAB_NUMER; ++i) + for (j=1; j < DIVTAB_DENOM; ++j) + integer_divide_table[i][j] = i / j; +#endif + + // compute how much temporary memory is needed + + // 1. + { + uint32 imdct_mem = (f->blocksize_1 * sizeof(float) >> 1); + uint32 classify_mem; + int i,max_part_read=0; + for (i=0; i < f->residue_count; ++i) { + Residue *r = f->residue_config + i; + unsigned int actual_size = f->blocksize_1 / 2; + unsigned int limit_r_begin = r->begin < actual_size ? r->begin : actual_size; + unsigned int limit_r_end = r->end < actual_size ? r->end : actual_size; + int n_read = limit_r_end - limit_r_begin; + int part_read = n_read / r->part_size; + if (part_read > max_part_read) + max_part_read = part_read; + } + #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE + classify_mem = f->channels * (sizeof(void*) + max_part_read * sizeof(uint8 *)); + #else + classify_mem = f->channels * (sizeof(void*) + max_part_read * sizeof(int *)); + #endif + + // maximum reasonable partition size is f->blocksize_1 + + f->temp_memory_required = classify_mem; + if (imdct_mem > f->temp_memory_required) + f->temp_memory_required = imdct_mem; + } + + + if (f->alloc.alloc_buffer) { + assert(f->temp_offset == f->alloc.alloc_buffer_length_in_bytes); + // check if there's enough temp memory so we don't error later + if (f->setup_offset + sizeof(*f) + f->temp_memory_required > (unsigned) f->temp_offset) + return error(f, VORBIS_outofmem); + } + + // @TODO: stb_vorbis_seek_start expects first_audio_page_offset to point to a page + // without PAGEFLAG_continued_packet, so this either points to the first page, or + // the page after the end of the headers. It might be cleaner to point to a page + // in the middle of the headers, when that's the page where the first audio packet + // starts, but we'd have to also correctly skip the end of any continued packet in + // stb_vorbis_seek_start. + if (f->next_seg == -1) { + f->first_audio_page_offset = stb_vorbis_get_file_offset(f); + } else { + f->first_audio_page_offset = 0; + } + + return TRUE; +} + +static void vorbis_deinit(stb_vorbis *p) +{ + int i,j; + + setup_free(p, p->vendor); + for (i=0; i < p->comment_list_length; ++i) { + setup_free(p, p->comment_list[i]); + } + setup_free(p, p->comment_list); + + if (p->residue_config) { + for (i=0; i < p->residue_count; ++i) { + Residue *r = p->residue_config+i; + if (r->classdata) { + for (j=0; j < p->codebooks[r->classbook].entries; ++j) + setup_free(p, r->classdata[j]); + setup_free(p, r->classdata); + } + setup_free(p, r->residue_books); + } + } + + if (p->codebooks) { + CHECK(p); + for (i=0; i < p->codebook_count; ++i) { + Codebook *c = p->codebooks + i; + setup_free(p, c->codeword_lengths); + setup_free(p, c->multiplicands); + setup_free(p, c->codewords); + setup_free(p, c->sorted_codewords); + // c->sorted_values[-1] is the first entry in the array + setup_free(p, c->sorted_values ? c->sorted_values-1 : NULL); + } + setup_free(p, p->codebooks); + } + setup_free(p, p->floor_config); + setup_free(p, p->residue_config); + if (p->mapping) { + for (i=0; i < p->mapping_count; ++i) + setup_free(p, p->mapping[i].chan); + setup_free(p, p->mapping); + } + CHECK(p); + for (i=0; i < p->channels && i < STB_VORBIS_MAX_CHANNELS; ++i) { + setup_free(p, p->channel_buffers[i]); + setup_free(p, p->previous_window[i]); + #ifdef STB_VORBIS_NO_DEFER_FLOOR + setup_free(p, p->floor_buffers[i]); + #endif + setup_free(p, p->finalY[i]); + } + for (i=0; i < 2; ++i) { + setup_free(p, p->A[i]); + setup_free(p, p->B[i]); + setup_free(p, p->C[i]); + setup_free(p, p->window[i]); + setup_free(p, p->bit_reverse[i]); + } + #ifndef STB_VORBIS_NO_STDIO + if (p->close_on_free) fclose(p->f); + #endif +} + +void stb_vorbis_close(stb_vorbis *p) +{ + if (p == NULL) return; + vorbis_deinit(p); + setup_free(p,p); +} + +static void vorbis_init(stb_vorbis *p, const stb_vorbis_alloc *z) +{ + memset(p, 0, sizeof(*p)); // NULL out all malloc'd pointers to start + if (z) { + p->alloc = *z; + p->alloc.alloc_buffer_length_in_bytes = (p->alloc.alloc_buffer_length_in_bytes+3) & ~3; + p->temp_offset = p->alloc.alloc_buffer_length_in_bytes; + } + p->eof = 0; + p->error = VORBIS__no_error; + p->stream = NULL; + p->codebooks = NULL; + p->page_crc_tests = -1; + #ifndef STB_VORBIS_NO_STDIO + p->close_on_free = FALSE; + p->f = NULL; + #endif +} + +int stb_vorbis_get_sample_offset(stb_vorbis *f) +{ + if (f->current_loc_valid) + return f->current_loc; + else + return -1; +} + +stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f) +{ + stb_vorbis_info d; + d.channels = f->channels; + d.sample_rate = f->sample_rate; + d.setup_memory_required = f->setup_memory_required; + d.setup_temp_memory_required = f->setup_temp_memory_required; + d.temp_memory_required = f->temp_memory_required; + d.max_frame_size = f->blocksize_1 >> 1; + return d; +} + +stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f) +{ + stb_vorbis_comment d; + d.vendor = f->vendor; + d.comment_list_length = f->comment_list_length; + d.comment_list = f->comment_list; + return d; +} + +int stb_vorbis_get_error(stb_vorbis *f) +{ + int e = f->error; + f->error = VORBIS__no_error; + return e; +} + +static stb_vorbis * vorbis_alloc(stb_vorbis *f) +{ + stb_vorbis *p = (stb_vorbis *) setup_malloc(f, sizeof(*p)); + return p; +} + +#ifndef STB_VORBIS_NO_PUSHDATA_API + +void stb_vorbis_flush_pushdata(stb_vorbis *f) +{ + f->previous_length = 0; + f->page_crc_tests = 0; + f->discard_samples_deferred = 0; + f->current_loc_valid = FALSE; + f->first_decode = FALSE; + f->samples_output = 0; + f->channel_buffer_start = 0; + f->channel_buffer_end = 0; +} + +static int vorbis_search_for_page_pushdata(vorb *f, uint8 *data, int data_len) +{ + int i,n; + for (i=0; i < f->page_crc_tests; ++i) + f->scan[i].bytes_done = 0; + + // if we have room for more scans, search for them first, because + // they may cause us to stop early if their header is incomplete + if (f->page_crc_tests < STB_VORBIS_PUSHDATA_CRC_COUNT) { + if (data_len < 4) return 0; + data_len -= 3; // need to look for 4-byte sequence, so don't miss + // one that straddles a boundary + for (i=0; i < data_len; ++i) { + if (data[i] == 0x4f) { + if (0==memcmp(data+i, ogg_page_header, 4)) { + int j,len; + uint32 crc; + // make sure we have the whole page header + if (i+26 >= data_len || i+27+data[i+26] >= data_len) { + // only read up to this page start, so hopefully we'll + // have the whole page header start next time + data_len = i; + break; + } + // ok, we have it all; compute the length of the page + len = 27 + data[i+26]; + for (j=0; j < data[i+26]; ++j) + len += data[i+27+j]; + // scan everything up to the embedded crc (which we must 0) + crc = 0; + for (j=0; j < 22; ++j) + crc = crc32_update(crc, data[i+j]); + // now process 4 0-bytes + for ( ; j < 26; ++j) + crc = crc32_update(crc, 0); + // len is the total number of bytes we need to scan + n = f->page_crc_tests++; + f->scan[n].bytes_left = len-j; + f->scan[n].crc_so_far = crc; + f->scan[n].goal_crc = data[i+22] + (data[i+23] << 8) + (data[i+24]<<16) + (data[i+25]<<24); + // if the last frame on a page is continued to the next, then + // we can't recover the sample_loc immediately + if (data[i+27+data[i+26]-1] == 255) + f->scan[n].sample_loc = ~0; + else + f->scan[n].sample_loc = data[i+6] + (data[i+7] << 8) + (data[i+ 8]<<16) + (data[i+ 9]<<24); + f->scan[n].bytes_done = i+j; + if (f->page_crc_tests == STB_VORBIS_PUSHDATA_CRC_COUNT) + break; + // keep going if we still have room for more + } + } + } + } + + for (i=0; i < f->page_crc_tests;) { + uint32 crc; + int j; + int n = f->scan[i].bytes_done; + int m = f->scan[i].bytes_left; + if (m > data_len - n) m = data_len - n; + // m is the bytes to scan in the current chunk + crc = f->scan[i].crc_so_far; + for (j=0; j < m; ++j) + crc = crc32_update(crc, data[n+j]); + f->scan[i].bytes_left -= m; + f->scan[i].crc_so_far = crc; + if (f->scan[i].bytes_left == 0) { + // does it match? + if (f->scan[i].crc_so_far == f->scan[i].goal_crc) { + // Houston, we have page + data_len = n+m; // consumption amount is wherever that scan ended + f->page_crc_tests = -1; // drop out of page scan mode + f->previous_length = 0; // decode-but-don't-output one frame + f->next_seg = -1; // start a new page + f->current_loc = f->scan[i].sample_loc; // set the current sample location + // to the amount we'd have decoded had we decoded this page + f->current_loc_valid = f->current_loc != ~0U; + return data_len; + } + // delete entry + f->scan[i] = f->scan[--f->page_crc_tests]; + } else { + ++i; + } + } + + return data_len; +} + +// return value: number of bytes we used +int stb_vorbis_decode_frame_pushdata( + stb_vorbis *f, // the file we're decoding + const uint8 *data, int data_len, // the memory available for decoding + int *channels, // place to write number of float * buffers + float ***output, // place to write float ** array of float * buffers + int *samples // place to write number of output samples + ) +{ + int i; + int len,right,left; + + if (!IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing); + + if (f->page_crc_tests >= 0) { + *samples = 0; + return vorbis_search_for_page_pushdata(f, (uint8 *) data, data_len); + } + + f->stream = (uint8 *) data; + f->stream_end = (uint8 *) data + data_len; + f->error = VORBIS__no_error; + + // check that we have the entire packet in memory + if (!is_whole_packet_present(f)) { + *samples = 0; + return 0; + } + + if (!vorbis_decode_packet(f, &len, &left, &right)) { + // save the actual error we encountered + enum STBVorbisError error = f->error; + if (error == VORBIS_bad_packet_type) { + // flush and resynch + f->error = VORBIS__no_error; + while (get8_packet(f) != EOP) + if (f->eof) break; + *samples = 0; + return (int) (f->stream - data); + } + if (error == VORBIS_continued_packet_flag_invalid) { + if (f->previous_length == 0) { + // we may be resynching, in which case it's ok to hit one + // of these; just discard the packet + f->error = VORBIS__no_error; + while (get8_packet(f) != EOP) + if (f->eof) break; + *samples = 0; + return (int) (f->stream - data); + } + } + // if we get an error while parsing, what to do? + // well, it DEFINITELY won't work to continue from where we are! + stb_vorbis_flush_pushdata(f); + // restore the error that actually made us bail + f->error = error; + *samples = 0; + return 1; + } + + // success! + len = vorbis_finish_frame(f, len, left, right); + for (i=0; i < f->channels; ++i) + f->outputs[i] = f->channel_buffers[i] + left; + + if (channels) *channels = f->channels; + *samples = len; + *output = f->outputs; + return (int) (f->stream - data); +} + +stb_vorbis *stb_vorbis_open_pushdata( + const unsigned char *data, int data_len, // the memory available for decoding + int *data_used, // only defined if result is not NULL + int *error, const stb_vorbis_alloc *alloc) +{ + stb_vorbis *f, p; + vorbis_init(&p, alloc); + p.stream = (uint8 *) data; + p.stream_end = (uint8 *) data + data_len; + p.push_mode = TRUE; + if (!start_decoder(&p)) { + if (p.eof) + *error = VORBIS_need_more_data; + else + *error = p.error; + return NULL; + } + f = vorbis_alloc(&p); + if (f) { + *f = p; + *data_used = (int) (f->stream - data); + *error = 0; + return f; + } else { + vorbis_deinit(&p); + return NULL; + } +} +#endif // STB_VORBIS_NO_PUSHDATA_API + +unsigned int stb_vorbis_get_file_offset(stb_vorbis *f) +{ + #ifndef STB_VORBIS_NO_PUSHDATA_API + if (f->push_mode) return 0; + #endif + if (USE_MEMORY(f)) return (unsigned int) (f->stream - f->stream_start); + #ifndef STB_VORBIS_NO_STDIO + return (unsigned int) (ftell(f->f) - f->f_start); + #endif +} + +#ifndef STB_VORBIS_NO_PULLDATA_API +// +// DATA-PULLING API +// + +static uint32 vorbis_find_page(stb_vorbis *f, uint32 *end, uint32 *last) +{ + for(;;) { + int n; + if (f->eof) return 0; + n = get8(f); + if (n == 0x4f) { // page header candidate + unsigned int retry_loc = stb_vorbis_get_file_offset(f); + int i; + // check if we're off the end of a file_section stream + if (retry_loc - 25 > f->stream_len) + return 0; + // check the rest of the header + for (i=1; i < 4; ++i) + if (get8(f) != ogg_page_header[i]) + break; + if (f->eof) return 0; + if (i == 4) { + uint8 header[27]; + uint32 i, crc, goal, len; + for (i=0; i < 4; ++i) + header[i] = ogg_page_header[i]; + for (; i < 27; ++i) + header[i] = get8(f); + if (f->eof) return 0; + if (header[4] != 0) goto invalid; + goal = header[22] + (header[23] << 8) + (header[24]<<16) + (header[25]<<24); + for (i=22; i < 26; ++i) + header[i] = 0; + crc = 0; + for (i=0; i < 27; ++i) + crc = crc32_update(crc, header[i]); + len = 0; + for (i=0; i < header[26]; ++i) { + int s = get8(f); + crc = crc32_update(crc, s); + len += s; + } + if (len && f->eof) return 0; + for (i=0; i < len; ++i) + crc = crc32_update(crc, get8(f)); + // finished parsing probable page + if (crc == goal) { + // we could now check that it's either got the last + // page flag set, OR it's followed by the capture + // pattern, but I guess TECHNICALLY you could have + // a file with garbage between each ogg page and recover + // from it automatically? So even though that paranoia + // might decrease the chance of an invalid decode by + // another 2^32, not worth it since it would hose those + // invalid-but-useful files? + if (end) + *end = stb_vorbis_get_file_offset(f); + if (last) { + if (header[5] & 0x04) + *last = 1; + else + *last = 0; + } + set_file_offset(f, retry_loc-1); + return 1; + } + } + invalid: + // not a valid page, so rewind and look for next one + set_file_offset(f, retry_loc); + } + } +} + + +#define SAMPLE_unknown 0xffffffff + +// seeking is implemented with a binary search, which narrows down the range to +// 64K, before using a linear search (because finding the synchronization +// pattern can be expensive, and the chance we'd find the end page again is +// relatively high for small ranges) +// +// two initial interpolation-style probes are used at the start of the search +// to try to bound either side of the binary search sensibly, while still +// working in O(log n) time if they fail. + +static int get_seek_page_info(stb_vorbis *f, ProbedPage *z) +{ + uint8 header[27], lacing[255]; + int i,len; + + // record where the page starts + z->page_start = stb_vorbis_get_file_offset(f); + + // parse the header + getn(f, header, 27); + if (header[0] != 'O' || header[1] != 'g' || header[2] != 'g' || header[3] != 'S') + return 0; + getn(f, lacing, header[26]); + + // determine the length of the payload + len = 0; + for (i=0; i < header[26]; ++i) + len += lacing[i]; + + // this implies where the page ends + z->page_end = z->page_start + 27 + header[26] + len; + + // read the last-decoded sample out of the data + z->last_decoded_sample = header[6] + (header[7] << 8) + (header[8] << 16) + (header[9] << 24); + + // restore file state to where we were + set_file_offset(f, z->page_start); + return 1; +} + +// rarely used function to seek back to the preceding page while finding the +// start of a packet +static int go_to_page_before(stb_vorbis *f, unsigned int limit_offset) +{ + unsigned int previous_safe, end; + + // now we want to seek back 64K from the limit + if (limit_offset >= 65536 && limit_offset-65536 >= f->first_audio_page_offset) + previous_safe = limit_offset - 65536; + else + previous_safe = f->first_audio_page_offset; + + set_file_offset(f, previous_safe); + + while (vorbis_find_page(f, &end, NULL)) { + if (end >= limit_offset && stb_vorbis_get_file_offset(f) < limit_offset) + return 1; + set_file_offset(f, end); + } + + return 0; +} + +// implements the search logic for finding a page and starting decoding. if +// the function succeeds, current_loc_valid will be true and current_loc will +// be less than or equal to the provided sample number (the closer the +// better). +static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number) +{ + ProbedPage left, right, mid; + int i, start_seg_with_known_loc, end_pos, page_start; + uint32 delta, stream_length, padding, last_sample_limit; + double offset = 0.0, bytes_per_sample = 0.0; + int probe = 0; + + // find the last page and validate the target sample + stream_length = stb_vorbis_stream_length_in_samples(f); + if (stream_length == 0) return error(f, VORBIS_seek_without_length); + if (sample_number > stream_length) return error(f, VORBIS_seek_invalid); + + // this is the maximum difference between the window-center (which is the + // actual granule position value), and the right-start (which the spec + // indicates should be the granule position (give or take one)). + padding = ((f->blocksize_1 - f->blocksize_0) >> 2); + if (sample_number < padding) + last_sample_limit = 0; + else + last_sample_limit = sample_number - padding; + + left = f->p_first; + while (left.last_decoded_sample == ~0U) { + // (untested) the first page does not have a 'last_decoded_sample' + set_file_offset(f, left.page_end); + if (!get_seek_page_info(f, &left)) goto error; + } + + right = f->p_last; + assert(right.last_decoded_sample != ~0U); + + // starting from the start is handled differently + if (last_sample_limit <= left.last_decoded_sample) { + if (stb_vorbis_seek_start(f)) { + if (f->current_loc > sample_number) + return error(f, VORBIS_seek_failed); + return 1; + } + return 0; + } + + while (left.page_end != right.page_start) { + assert(left.page_end < right.page_start); + // search range in bytes + delta = right.page_start - left.page_end; + if (delta <= 65536) { + // there's only 64K left to search - handle it linearly + set_file_offset(f, left.page_end); + } else { + if (probe < 2) { + if (probe == 0) { + // first probe (interpolate) + double data_bytes = right.page_end - left.page_start; + bytes_per_sample = data_bytes / right.last_decoded_sample; + offset = left.page_start + bytes_per_sample * (last_sample_limit - left.last_decoded_sample); + } else { + // second probe (try to bound the other side) + double error = ((double) last_sample_limit - mid.last_decoded_sample) * bytes_per_sample; + if (error >= 0 && error < 8000) error = 8000; + if (error < 0 && error > -8000) error = -8000; + offset += error * 2; + } + + // ensure the offset is valid + if (offset < left.page_end) + offset = left.page_end; + if (offset > right.page_start - 65536) + offset = right.page_start - 65536; + + set_file_offset(f, (unsigned int) offset); + } else { + // binary search for large ranges (offset by 32K to ensure + // we don't hit the right page) + set_file_offset(f, left.page_end + (delta / 2) - 32768); + } + + if (!vorbis_find_page(f, NULL, NULL)) goto error; + } + + for (;;) { + if (!get_seek_page_info(f, &mid)) goto error; + if (mid.last_decoded_sample != ~0U) break; + // (untested) no frames end on this page + set_file_offset(f, mid.page_end); + assert(mid.page_start < right.page_start); + } + + // if we've just found the last page again then we're in a tricky file, + // and we're close enough (if it wasn't an interpolation probe). + if (mid.page_start == right.page_start) { + if (probe >= 2 || delta <= 65536) + break; + } else { + if (last_sample_limit < mid.last_decoded_sample) + right = mid; + else + left = mid; + } + + ++probe; + } + + // seek back to start of the last packet + page_start = left.page_start; + set_file_offset(f, page_start); + if (!start_page(f)) return error(f, VORBIS_seek_failed); + end_pos = f->end_seg_with_known_loc; + assert(end_pos >= 0); + + for (;;) { + for (i = end_pos; i > 0; --i) + if (f->segments[i-1] != 255) + break; + + start_seg_with_known_loc = i; + + if (start_seg_with_known_loc > 0 || !(f->page_flag & PAGEFLAG_continued_packet)) + break; + + // (untested) the final packet begins on an earlier page + if (!go_to_page_before(f, page_start)) + goto error; + + page_start = stb_vorbis_get_file_offset(f); + if (!start_page(f)) goto error; + end_pos = f->segment_count - 1; + } + + // prepare to start decoding + f->current_loc_valid = FALSE; + f->last_seg = FALSE; + f->valid_bits = 0; + f->packet_bytes = 0; + f->bytes_in_seg = 0; + f->previous_length = 0; + f->next_seg = start_seg_with_known_loc; + + for (i = 0; i < start_seg_with_known_loc; i++) + skip(f, f->segments[i]); + + // start decoding (optimizable - this frame is generally discarded) + if (!vorbis_pump_first_frame(f)) + return 0; + if (f->current_loc > sample_number) + return error(f, VORBIS_seek_failed); + return 1; + +error: + // try to restore the file to a valid state + stb_vorbis_seek_start(f); + return error(f, VORBIS_seek_failed); +} + +// the same as vorbis_decode_initial, but without advancing +static int peek_decode_initial(vorb *f, int *p_left_start, int *p_left_end, int *p_right_start, int *p_right_end, int *mode) +{ + int bits_read, bytes_read; + + if (!vorbis_decode_initial(f, p_left_start, p_left_end, p_right_start, p_right_end, mode)) + return 0; + + // either 1 or 2 bytes were read, figure out which so we can rewind + bits_read = 1 + ilog(f->mode_count-1); + if (f->mode_config[*mode].blockflag) + bits_read += 2; + bytes_read = (bits_read + 7) / 8; + + f->bytes_in_seg += bytes_read; + f->packet_bytes -= bytes_read; + skip(f, -bytes_read); + if (f->next_seg == -1) + f->next_seg = f->segment_count - 1; + else + f->next_seg--; + f->valid_bits = 0; + + return 1; +} + +int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number) +{ + uint32 max_frame_samples; + + if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing); + + // fast page-level search + if (!seek_to_sample_coarse(f, sample_number)) + return 0; + + assert(f->current_loc_valid); + assert(f->current_loc <= sample_number); + + // linear search for the relevant packet + max_frame_samples = (f->blocksize_1*3 - f->blocksize_0) >> 2; + while (f->current_loc < sample_number) { + int left_start, left_end, right_start, right_end, mode, frame_samples; + if (!peek_decode_initial(f, &left_start, &left_end, &right_start, &right_end, &mode)) + return error(f, VORBIS_seek_failed); + // calculate the number of samples returned by the next frame + frame_samples = right_start - left_start; + if (f->current_loc + frame_samples > sample_number) { + return 1; // the next frame will contain the sample + } else if (f->current_loc + frame_samples + max_frame_samples > sample_number) { + // there's a chance the frame after this could contain the sample + vorbis_pump_first_frame(f); + } else { + // this frame is too early to be relevant + f->current_loc += frame_samples; + f->previous_length = 0; + maybe_start_packet(f); + flush_packet(f); + } + } + // the next frame should start with the sample + if (f->current_loc != sample_number) return error(f, VORBIS_seek_failed); + return 1; +} + +int stb_vorbis_seek(stb_vorbis *f, unsigned int sample_number) +{ + if (!stb_vorbis_seek_frame(f, sample_number)) + return 0; + + if (sample_number != f->current_loc) { + int n; + uint32 frame_start = f->current_loc; + stb_vorbis_get_frame_float(f, &n, NULL); + assert(sample_number > frame_start); + assert(f->channel_buffer_start + (int) (sample_number-frame_start) <= f->channel_buffer_end); + f->channel_buffer_start += (sample_number - frame_start); + } + + return 1; +} + +int stb_vorbis_seek_start(stb_vorbis *f) +{ + if (IS_PUSH_MODE(f)) { return error(f, VORBIS_invalid_api_mixing); } + set_file_offset(f, f->first_audio_page_offset); + f->previous_length = 0; + f->first_decode = TRUE; + f->next_seg = -1; + return vorbis_pump_first_frame(f); +} + +unsigned int stb_vorbis_stream_length_in_samples(stb_vorbis *f) +{ + unsigned int restore_offset, previous_safe; + unsigned int end, last_page_loc; + + if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing); + if (!f->total_samples) { + unsigned int last; + uint32 lo,hi; + char header[6]; + + // first, store the current decode position so we can restore it + restore_offset = stb_vorbis_get_file_offset(f); + + // now we want to seek back 64K from the end (the last page must + // be at most a little less than 64K, but let's allow a little slop) + if (f->stream_len >= 65536 && f->stream_len-65536 >= f->first_audio_page_offset) + previous_safe = f->stream_len - 65536; + else + previous_safe = f->first_audio_page_offset; + + set_file_offset(f, previous_safe); + // previous_safe is now our candidate 'earliest known place that seeking + // to will lead to the final page' + + if (!vorbis_find_page(f, &end, &last)) { + // if we can't find a page, we're hosed! + f->error = VORBIS_cant_find_last_page; + f->total_samples = 0xffffffff; + goto done; + } + + // check if there are more pages + last_page_loc = stb_vorbis_get_file_offset(f); + + // stop when the last_page flag is set, not when we reach eof; + // this allows us to stop short of a 'file_section' end without + // explicitly checking the length of the section + while (!last) { + set_file_offset(f, end); + if (!vorbis_find_page(f, &end, &last)) { + // the last page we found didn't have the 'last page' flag + // set. whoops! + break; + } + previous_safe = last_page_loc+1; + last_page_loc = stb_vorbis_get_file_offset(f); + } + + set_file_offset(f, last_page_loc); + + // parse the header + getn(f, (unsigned char *)header, 6); + // extract the absolute granule position + lo = get32(f); + hi = get32(f); + if (lo == 0xffffffff && hi == 0xffffffff) { + f->error = VORBIS_cant_find_last_page; + f->total_samples = SAMPLE_unknown; + goto done; + } + if (hi) + lo = 0xfffffffe; // saturate + f->total_samples = lo; + + f->p_last.page_start = last_page_loc; + f->p_last.page_end = end; + f->p_last.last_decoded_sample = lo; + + done: + set_file_offset(f, restore_offset); + } + return f->total_samples == SAMPLE_unknown ? 0 : f->total_samples; +} + +float stb_vorbis_stream_length_in_seconds(stb_vorbis *f) +{ + return stb_vorbis_stream_length_in_samples(f) / (float) f->sample_rate; +} + + + +int stb_vorbis_get_frame_float(stb_vorbis *f, int *channels, float ***output) +{ + int len, right,left,i; + if (IS_PUSH_MODE(f)) return error(f, VORBIS_invalid_api_mixing); + + if (!vorbis_decode_packet(f, &len, &left, &right)) { + f->channel_buffer_start = f->channel_buffer_end = 0; + return 0; + } + + len = vorbis_finish_frame(f, len, left, right); + for (i=0; i < f->channels; ++i) + f->outputs[i] = f->channel_buffers[i] + left; + + f->channel_buffer_start = left; + f->channel_buffer_end = left+len; + + if (channels) *channels = f->channels; + if (output) *output = f->outputs; + return len; +} + +#ifndef STB_VORBIS_NO_STDIO + +stb_vorbis * stb_vorbis_open_file_section(FILE *file, int close_on_free, int *error, const stb_vorbis_alloc *alloc, unsigned int length) +{ + stb_vorbis *f, p; + vorbis_init(&p, alloc); + p.f = file; + p.f_start = (uint32) ftell(file); + p.stream_len = length; + p.close_on_free = close_on_free; + if (start_decoder(&p)) { + f = vorbis_alloc(&p); + if (f) { + *f = p; + vorbis_pump_first_frame(f); + return f; + } + } + if (error) *error = p.error; + vorbis_deinit(&p); + return NULL; +} + +stb_vorbis * stb_vorbis_open_file(FILE *file, int close_on_free, int *error, const stb_vorbis_alloc *alloc) +{ + unsigned int len, start; + start = (unsigned int) ftell(file); + fseek(file, 0, SEEK_END); + len = (unsigned int) (ftell(file) - start); + fseek(file, start, SEEK_SET); + return stb_vorbis_open_file_section(file, close_on_free, error, alloc, len); +} + +stb_vorbis * stb_vorbis_open_filename(const char *filename, int *error, const stb_vorbis_alloc *alloc) +{ + FILE *f; +#if defined(_WIN32) && defined(__STDC_WANT_SECURE_LIB__) + if (0 != fopen_s(&f, filename, "rb")) + f = NULL; +#else + f = fopen(filename, "rb"); +#endif + if (f) + return stb_vorbis_open_file(f, TRUE, error, alloc); + if (error) *error = VORBIS_file_open_failure; + return NULL; +} +#endif // STB_VORBIS_NO_STDIO + +stb_vorbis * stb_vorbis_open_memory(const unsigned char *data, int len, int *error, const stb_vorbis_alloc *alloc) +{ + stb_vorbis *f, p; + if (data == NULL) return NULL; + vorbis_init(&p, alloc); + p.stream = (uint8 *) data; + p.stream_end = (uint8 *) data + len; + p.stream_start = (uint8 *) p.stream; + p.stream_len = len; + p.push_mode = FALSE; + if (start_decoder(&p)) { + f = vorbis_alloc(&p); + if (f) { + *f = p; + vorbis_pump_first_frame(f); + if (error) *error = VORBIS__no_error; + return f; + } + } + if (error) *error = p.error; + vorbis_deinit(&p); + return NULL; +} + +#ifndef STB_VORBIS_NO_INTEGER_CONVERSION +#define PLAYBACK_MONO 1 +#define PLAYBACK_LEFT 2 +#define PLAYBACK_RIGHT 4 + +#define L (PLAYBACK_LEFT | PLAYBACK_MONO) +#define C (PLAYBACK_LEFT | PLAYBACK_RIGHT | PLAYBACK_MONO) +#define R (PLAYBACK_RIGHT | PLAYBACK_MONO) + +static int8 channel_position[7][6] = +{ + { 0 }, + { C }, + { L, R }, + { L, C, R }, + { L, R, L, R }, + { L, C, R, L, R }, + { L, C, R, L, R, C }, +}; + + +#ifndef STB_VORBIS_NO_FAST_SCALED_FLOAT + typedef union { + float f; + int i; + } float_conv; + typedef char stb_vorbis_float_size_test[sizeof(float)==4 && sizeof(int) == 4]; + #define FASTDEF(x) float_conv x + // add (1<<23) to convert to int, then divide by 2^SHIFT, then add 0.5/2^SHIFT to round + #define MAGIC(SHIFT) (1.5f * (1 << (23-SHIFT)) + 0.5f/(1 << SHIFT)) + #define ADDEND(SHIFT) (((150-SHIFT) << 23) + (1 << 22)) + #define FAST_SCALED_FLOAT_TO_INT(temp,x,s) (temp.f = (x) + MAGIC(s), temp.i - ADDEND(s)) + #define check_endianness() +#else + #define FAST_SCALED_FLOAT_TO_INT(temp,x,s) ((int) ((x) * (1 << (s)))) + #define check_endianness() + #define FASTDEF(x) +#endif + +static void copy_samples(short *dest, float *src, int len) +{ + int i; + check_endianness(); + for (i=0; i < len; ++i) { + FASTDEF(temp); + int v = FAST_SCALED_FLOAT_TO_INT(temp, src[i],15); + if ((unsigned int) (v + 32768) > 65535) + v = v < 0 ? -32768 : 32767; + dest[i] = v; + } +} + +static void compute_samples(int mask, short *output, int num_c, float **data, int d_offset, int len) +{ + #define BUFFER_SIZE 32 + float buffer[BUFFER_SIZE]; + int i,j,o,n = BUFFER_SIZE; + check_endianness(); + for (o = 0; o < len; o += BUFFER_SIZE) { + memset(buffer, 0, sizeof(buffer)); + if (o + n > len) n = len - o; + for (j=0; j < num_c; ++j) { + if (channel_position[num_c][j] & mask) { + for (i=0; i < n; ++i) + buffer[i] += data[j][d_offset+o+i]; + } + } + for (i=0; i < n; ++i) { + FASTDEF(temp); + int v = FAST_SCALED_FLOAT_TO_INT(temp,buffer[i],15); + if ((unsigned int) (v + 32768) > 65535) + v = v < 0 ? -32768 : 32767; + output[o+i] = v; + } + } +} + +static void compute_stereo_samples(short *output, int num_c, float **data, int d_offset, int len) +{ + #define BUFFER_SIZE 32 + float buffer[BUFFER_SIZE]; + int i,j,o,n = BUFFER_SIZE >> 1; + // o is the offset in the source data + check_endianness(); + for (o = 0; o < len; o += BUFFER_SIZE >> 1) { + // o2 is the offset in the output data + int o2 = o << 1; + memset(buffer, 0, sizeof(buffer)); + if (o + n > len) n = len - o; + for (j=0; j < num_c; ++j) { + int m = channel_position[num_c][j] & (PLAYBACK_LEFT | PLAYBACK_RIGHT); + if (m == (PLAYBACK_LEFT | PLAYBACK_RIGHT)) { + for (i=0; i < n; ++i) { + buffer[i*2+0] += data[j][d_offset+o+i]; + buffer[i*2+1] += data[j][d_offset+o+i]; + } + } else if (m == PLAYBACK_LEFT) { + for (i=0; i < n; ++i) { + buffer[i*2+0] += data[j][d_offset+o+i]; + } + } else if (m == PLAYBACK_RIGHT) { + for (i=0; i < n; ++i) { + buffer[i*2+1] += data[j][d_offset+o+i]; + } + } + } + for (i=0; i < (n<<1); ++i) { + FASTDEF(temp); + int v = FAST_SCALED_FLOAT_TO_INT(temp,buffer[i],15); + if ((unsigned int) (v + 32768) > 65535) + v = v < 0 ? -32768 : 32767; + output[o2+i] = v; + } + } +} + +static void convert_samples_short(int buf_c, short **buffer, int b_offset, int data_c, float **data, int d_offset, int samples) +{ + int i; + if (buf_c != data_c && buf_c <= 2 && data_c <= 6) { + static int channel_selector[3][2] = { {0}, {PLAYBACK_MONO}, {PLAYBACK_LEFT, PLAYBACK_RIGHT} }; + for (i=0; i < buf_c; ++i) + compute_samples(channel_selector[buf_c][i], buffer[i]+b_offset, data_c, data, d_offset, samples); + } else { + int limit = buf_c < data_c ? buf_c : data_c; + for (i=0; i < limit; ++i) + copy_samples(buffer[i]+b_offset, data[i]+d_offset, samples); + for ( ; i < buf_c; ++i) + memset(buffer[i]+b_offset, 0, sizeof(short) * samples); + } +} + +int stb_vorbis_get_frame_short(stb_vorbis *f, int num_c, short **buffer, int num_samples) +{ + float **output = NULL; + int len = stb_vorbis_get_frame_float(f, NULL, &output); + if (len > num_samples) len = num_samples; + if (len) + convert_samples_short(num_c, buffer, 0, f->channels, output, 0, len); + return len; +} + +static void convert_channels_short_interleaved(int buf_c, short *buffer, int data_c, float **data, int d_offset, int len) +{ + int i; + check_endianness(); + if (buf_c != data_c && buf_c <= 2 && data_c <= 6) { + assert(buf_c == 2); + for (i=0; i < buf_c; ++i) + compute_stereo_samples(buffer, data_c, data, d_offset, len); + } else { + int limit = buf_c < data_c ? buf_c : data_c; + int j; + for (j=0; j < len; ++j) { + for (i=0; i < limit; ++i) { + FASTDEF(temp); + float f = data[i][d_offset+j]; + int v = FAST_SCALED_FLOAT_TO_INT(temp, f,15);//data[i][d_offset+j],15); + if ((unsigned int) (v + 32768) > 65535) + v = v < 0 ? -32768 : 32767; + *buffer++ = v; + } + for ( ; i < buf_c; ++i) + *buffer++ = 0; + } + } +} + +int stb_vorbis_get_frame_short_interleaved(stb_vorbis *f, int num_c, short *buffer, int num_shorts) +{ + float **output; + int len; + if (num_c == 1) return stb_vorbis_get_frame_short(f,num_c,&buffer, num_shorts); + len = stb_vorbis_get_frame_float(f, NULL, &output); + if (len) { + if (len*num_c > num_shorts) len = num_shorts / num_c; + convert_channels_short_interleaved(num_c, buffer, f->channels, output, 0, len); + } + return len; +} + +int stb_vorbis_get_samples_short_interleaved(stb_vorbis *f, int channels, short *buffer, int num_shorts) +{ + float **outputs; + int len = num_shorts / channels; + int n=0; + int z = f->channels; + if (z > channels) z = channels; + while (n < len) { + int k = f->channel_buffer_end - f->channel_buffer_start; + if (n+k >= len) k = len - n; + if (k) + convert_channels_short_interleaved(channels, buffer, f->channels, f->channel_buffers, f->channel_buffer_start, k); + buffer += k*channels; + n += k; + f->channel_buffer_start += k; + if (n == len) break; + if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) break; + } + return n; +} + +int stb_vorbis_get_samples_short(stb_vorbis *f, int channels, short **buffer, int len) +{ + float **outputs; + int n=0; + int z = f->channels; + if (z > channels) z = channels; + while (n < len) { + int k = f->channel_buffer_end - f->channel_buffer_start; + if (n+k >= len) k = len - n; + if (k) + convert_samples_short(channels, buffer, n, f->channels, f->channel_buffers, f->channel_buffer_start, k); + n += k; + f->channel_buffer_start += k; + if (n == len) break; + if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) break; + } + return n; +} + +#ifndef STB_VORBIS_NO_STDIO +int stb_vorbis_decode_filename(const char *filename, int *channels, int *sample_rate, short **output) +{ + int data_len, offset, total, limit, error; + short *data; + stb_vorbis *v = stb_vorbis_open_filename(filename, &error, NULL); + if (v == NULL) return -1; + limit = v->channels * 4096; + *channels = v->channels; + if (sample_rate) + *sample_rate = v->sample_rate; + offset = data_len = 0; + total = limit; + data = (short *) malloc(total * sizeof(*data)); + if (data == NULL) { + stb_vorbis_close(v); + return -2; + } + for (;;) { + int n = stb_vorbis_get_frame_short_interleaved(v, v->channels, data+offset, total-offset); + if (n == 0) break; + data_len += n; + offset += n * v->channels; + if (offset + limit > total) { + short *data2; + total *= 2; + data2 = (short *) realloc(data, total * sizeof(*data)); + if (data2 == NULL) { + free(data); + stb_vorbis_close(v); + return -2; + } + data = data2; + } + } + *output = data; + stb_vorbis_close(v); + return data_len; +} +#endif // NO_STDIO + +int stb_vorbis_decode_memory(const uint8 *mem, int len, int *channels, int *sample_rate, short **output) +{ + int data_len, offset, total, limit, error; + short *data; + stb_vorbis *v = stb_vorbis_open_memory(mem, len, &error, NULL); + if (v == NULL) return -1; + limit = v->channels * 4096; + *channels = v->channels; + if (sample_rate) + *sample_rate = v->sample_rate; + offset = data_len = 0; + total = limit; + data = (short *) malloc(total * sizeof(*data)); + if (data == NULL) { + stb_vorbis_close(v); + return -2; + } + for (;;) { + int n = stb_vorbis_get_frame_short_interleaved(v, v->channels, data+offset, total-offset); + if (n == 0) break; + data_len += n; + offset += n * v->channels; + if (offset + limit > total) { + short *data2; + total *= 2; + data2 = (short *) realloc(data, total * sizeof(*data)); + if (data2 == NULL) { + free(data); + stb_vorbis_close(v); + return -2; + } + data = data2; + } + } + *output = data; + stb_vorbis_close(v); + return data_len; +} +#endif // STB_VORBIS_NO_INTEGER_CONVERSION + +int stb_vorbis_get_samples_float_interleaved(stb_vorbis *f, int channels, float *buffer, int num_floats) +{ + float **outputs; + int len = num_floats / channels; + int n=0; + int z = f->channels; + if (z > channels) z = channels; + while (n < len) { + int i,j; + int k = f->channel_buffer_end - f->channel_buffer_start; + if (n+k >= len) k = len - n; + for (j=0; j < k; ++j) { + for (i=0; i < z; ++i) + *buffer++ = f->channel_buffers[i][f->channel_buffer_start+j]; + for ( ; i < channels; ++i) + *buffer++ = 0; + } + n += k; + f->channel_buffer_start += k; + if (n == len) + break; + if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) + break; + } + return n; +} + +int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, int num_samples) +{ + float **outputs; + int n=0; + int z = f->channels; + if (z > channels) z = channels; + while (n < num_samples) { + int i; + int k = f->channel_buffer_end - f->channel_buffer_start; + if (n+k >= num_samples) k = num_samples - n; + if (k) { + for (i=0; i < z; ++i) + memcpy(buffer[i]+n, f->channel_buffers[i]+f->channel_buffer_start, sizeof(float)*k); + for ( ; i < channels; ++i) + memset(buffer[i]+n, 0, sizeof(float) * k); + } + n += k; + f->channel_buffer_start += k; + if (n == num_samples) + break; + if (!stb_vorbis_get_frame_float(f, NULL, &outputs)) + break; + } + return n; +} +#endif // STB_VORBIS_NO_PULLDATA_API + +/* Version history + 1.17 - 2019-07-08 - fix CVE-2019-13217, -13218, -13219, -13220, -13221, -13222, -13223 + found with Mayhem by ForAllSecure + 1.16 - 2019-03-04 - fix warnings + 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found + 1.14 - 2018-02-11 - delete bogus dealloca usage + 1.13 - 2018-01-29 - fix truncation of last frame (hopefully) + 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files + 1.11 - 2017-07-23 - fix MinGW compilation + 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory + 1.09 - 2016-04-04 - back out 'avoid discarding last frame' fix from previous version + 1.08 - 2016-04-02 - fixed multiple warnings; fix setup memory leaks; + avoid discarding last frame of audio data + 1.07 - 2015-01-16 - fixed some warnings, fix mingw, const-correct API + some more crash fixes when out of memory or with corrupt files + 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson) + some crash fixes when out of memory or with corrupt files + 1.05 - 2015-04-19 - don't define __forceinline if it's redundant + 1.04 - 2014-08-27 - fix missing const-correct case in API + 1.03 - 2014-08-07 - Warning fixes + 1.02 - 2014-07-09 - Declare qsort compare function _cdecl on windows + 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float + 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in multichannel + (API change) report sample rate for decode-full-file funcs + 0.99996 - bracket #include for macintosh compilation by Laurent Gomila + 0.99995 - use union instead of pointer-cast for fast-float-to-int to avoid alias-optimization problem + 0.99994 - change fast-float-to-int to work in single-precision FPU mode, remove endian-dependence + 0.99993 - remove assert that fired on legal files with empty tables + 0.99992 - rewind-to-start + 0.99991 - bugfix to stb_vorbis_get_samples_short by Bernhard Wodo + 0.9999 - (should have been 0.99990) fix no-CRT support, compiling as C++ + 0.9998 - add a full-decode function with a memory source + 0.9997 - fix a bug in the read-from-FILE case in 0.9996 addition + 0.9996 - query length of vorbis stream in samples/seconds + 0.9995 - bugfix to another optimization that only happened in certain files + 0.9994 - bugfix to one of the optimizations that caused significant (but inaudible?) errors + 0.9993 - performance improvements; runs in 99% to 104% of time of reference implementation + 0.9992 - performance improvement of IMDCT; now performs close to reference implementation + 0.9991 - performance improvement of IMDCT + 0.999 - (should have been 0.9990) performance improvement of IMDCT + 0.998 - no-CRT support from Casey Muratori + 0.997 - bugfixes for bugs found by Terje Mathisen + 0.996 - bugfix: fast-huffman decode initialized incorrectly for sparse codebooks; fixing gives 10% speedup - found by Terje Mathisen + 0.995 - bugfix: fix to 'effective' overrun detection - found by Terje Mathisen + 0.994 - bugfix: garbage decode on final VQ symbol of a non-multiple - found by Terje Mathisen + 0.993 - bugfix: pushdata API required 1 extra byte for empty page (failed to consume final page if empty) - found by Terje Mathisen + 0.992 - fixes for MinGW warning + 0.991 - turn fast-float-conversion on by default + 0.990 - fix push-mode seek recovery if you seek into the headers + 0.98b - fix to bad release of 0.98 + 0.98 - fix push-mode seek recovery; robustify float-to-int and support non-fast mode + 0.97 - builds under c++ (typecasting, don't use 'class' keyword) + 0.96 - somehow MY 0.95 was right, but the web one was wrong, so here's my 0.95 rereleased as 0.96, fixes a typo in the clamping code + 0.95 - clamping code for 16-bit functions + 0.94 - not publically released + 0.93 - fixed all-zero-floor case (was decoding garbage) + 0.92 - fixed a memory leak + 0.91 - conditional compiles to omit parts of the API and the infrastructure to support them: STB_VORBIS_NO_PULLDATA_API, STB_VORBIS_NO_PUSHDATA_API, STB_VORBIS_NO_STDIO, STB_VORBIS_NO_INTEGER_CONVERSION + 0.90 - first public release +*/ + +#endif // STB_VORBIS_HEADER_ONLY + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/steam/steamworks_thin.h b/steam/steamworks_thin.h new file mode 100644 index 0000000..5c6397b --- /dev/null +++ b/steam/steamworks_thin.h @@ -0,0 +1,848 @@ +/* + steamworks_thin.h - simple wrapper / macro set for building steamworks apps in pure C + Usage: + Put the latest steam_api.dll in your build folder + link against -lsteam_api + + all functions are prefixed with sw_ +*/ + +#ifndef STEAMWORKS_H +#define STEAMWORKS_H + +#include +#include + +GLuint _localplayer_image; +char _localplayer_name[128]; + +#ifndef NO_STEAM + +#ifndef VOYAGER_RELEASE_MODE + #define STRUCTURE_PACK_CHECK +#endif + +/*______________________________________________________________________________________________________________________________ + + ENUMS +______________________________________________________________________________________________________________________________*/ + +typedef enum{ + + k_ESteamIPTypeIPv4 = 0, + k_ESteamIPTypeIPv6 = 1, + +} ESteamIPType_t; + +typedef enum{ + + k_ESNetSocketConnectionTypeNotConnected = 0, + k_ESNetSocketConnectionTypeUDP = 1, + k_ESNetSocketConnectionTypeUDPRelay = 2, + +} ESNetSocketConnectionType_t; + +//----------------------------------------------------------------------------- +// Purpose: Base values for callback identifiers, each callback must +// have a unique ID. +//----------------------------------------------------------------------------- + +enum { k_iSteamUserCallbacks = 100 }; +enum { k_iSteamGameServerCallbacks = 200 }; +enum { k_iSteamFriendsCallbacks = 300 }; +enum { k_iSteamBillingCallbacks = 400 }; +enum { k_iSteamMatchmakingCallbacks = 500 }; +enum { k_iSteamContentServerCallbacks = 600 }; +enum { k_iSteamUtilsCallbacks = 700 }; +enum { k_iClientFriendsCallbacks = 800 }; +enum { k_iClientUserCallbacks = 900 }; +enum { k_iSteamAppsCallbacks = 1000 }; +enum { k_iSteamUserStatsCallbacks = 1100 }; +enum { k_iSteamNetworkingCallbacks = 1200 }; +enum { k_iSteamNetworkingSocketsCallbacks = 1220 }; +enum { k_iSteamNetworkingMessagesCallbacks = 1250 }; +enum { k_iSteamNetworkingUtilsCallbacks = 1280 }; +enum { k_iClientRemoteStorageCallbacks = 1300 }; +enum { k_iClientDepotBuilderCallbacks = 1400 }; +enum { k_iSteamGameServerItemsCallbacks = 1500 }; +enum { k_iClientUtilsCallbacks = 1600 }; +enum { k_iSteamGameCoordinatorCallbacks = 1700 }; +enum { k_iSteamGameServerStatsCallbacks = 1800 }; +enum { k_iSteam2AsyncCallbacks = 1900 }; +enum { k_iSteamGameStatsCallbacks = 2000 }; +enum { k_iClientHTTPCallbacks = 2100 }; +enum { k_iClientScreenshotsCallbacks = 2200 }; +enum { k_iSteamScreenshotsCallbacks = 2300 }; +enum { k_iClientAudioCallbacks = 2400 }; +enum { k_iClientUnifiedMessagesCallbacks = 2500 }; +enum { k_iSteamStreamLauncherCallbacks = 2600 }; +enum { k_iClientControllerCallbacks = 2700 }; +enum { k_iSteamControllerCallbacks = 2800 }; +enum { k_iClientParentalSettingsCallbacks = 2900 }; +enum { k_iClientDeviceAuthCallbacks = 3000 }; +enum { k_iClientNetworkDeviceManagerCallbacks = 3100 }; +enum { k_iClientMusicCallbacks = 3200 }; +enum { k_iClientRemoteClientManagerCallbacks = 3300 }; +enum { k_iClientUGCCallbacks = 3400 }; +enum { k_iSteamStreamClientCallbacks = 3500 }; +enum { k_IClientProductBuilderCallbacks = 3600 }; +enum { k_iClientShortcutsCallbacks = 3700 }; +enum { k_iClientRemoteControlManagerCallbacks = 3800 }; +enum { k_iSteamAppListCallbacks = 3900 }; +enum { k_iSteamMusicCallbacks = 4000 }; +enum { k_iSteamMusicRemoteCallbacks = 4100 }; +enum { k_iClientVRCallbacks = 4200 }; +enum { k_iClientGameNotificationCallbacks = 4300 }; +enum { k_iSteamGameNotificationCallbacks = 4400 }; +enum { k_iSteamHTMLSurfaceCallbacks = 4500 }; +enum { k_iClientVideoCallbacks = 4600 }; +enum { k_iClientInventoryCallbacks = 4700 }; +enum { k_iClientBluetoothManagerCallbacks = 4800 }; +enum { k_iClientSharedConnectionCallbacks = 4900 }; +enum { k_ISteamParentalSettingsCallbacks = 5000 }; +enum { k_iClientShaderCallbacks = 5100 }; +enum { k_iSteamGameSearchCallbacks = 5200 }; +enum { k_iSteamPartiesCallbacks = 5300 }; +enum { k_iClientPartiesCallbacks = 5400 }; +enum { k_iSteamSTARCallbacks = 5500 }; +enum { k_iClientSTARCallbacks = 5600 }; +enum { k_iSteamRemotePlayCallbacks = 5700 }; +enum { k_iClientCompatCallbacks = 5800 }; +enum { k_iSteamChatCallbacks = 5900 }; + +// Steam universes. Each universe is a self-contained Steam instance. +typedef enum { + k_EUniverseInvalid = 0, + k_EUniversePublic = 1, + k_EUniverseBeta = 2, + k_EUniverseInternal = 3, + k_EUniverseDev = 4, + // k_EUniverseRC = 5, // no such universe anymore + k_EUniverseMax +} EUniverse_t; + +typedef enum{ + // Basic UDP send. Packets can't be bigger than 1200 bytes (your typical MTU size). Can be lost, or arrive out of order (rare). + // The sending API does have some knowledge of the underlying connection, so if there is no NAT-traversal accomplished or + // there is a recognized adjustment happening on the connection, the packet will be batched until the connection is open again. + k_EP2PSendUnreliable = 0, + + // As above, but if the underlying p2p connection isn't yet established the packet will just be thrown away. Using this on the first + // packet sent to a remote host almost guarantees the packet will be dropped. + // This is only really useful for kinds of data that should never buffer up, i.e. voice payload packets + k_EP2PSendUnreliableNoDelay = 1, + + // Reliable message send. Can send up to 1MB of data in a single message. + // Does fragmentation/re-assembly of messages under the hood, as well as a sliding window for efficient sends of large chunks of data. + k_EP2PSendReliable = 2, + + // As above, but applies the Nagle algorithm to the send - sends will accumulate + // until the current MTU size (typically ~1200 bytes, but can change) or ~200ms has passed (Nagle algorithm). + // Useful if you want to send a set of smaller messages but have the coalesced into a single packet + // Since the reliable stream is all ordered, you can do several small message sends with k_EP2PSendReliableWithBuffering and then + // do a normal k_EP2PSendReliable to force all the buffered data to be sent. + k_EP2PSendReliableWithBuffering = 3, + +} EP2PSend_t; + +// list of possible errors returned by SendP2PPacket() API +// these will be posted in the P2PSessionConnectFail_t callback +typedef enum { + k_EP2PSessionErrorNone = 0, + k_EP2PSessionErrorNotRunningApp = 1, // target is not running the same game + k_EP2PSessionErrorNoRightsToApp = 2, // local user doesn't own the app that is running + k_EP2PSessionErrorDestinationNotLoggedIn = 3, // target user isn't connected to Steam + k_EP2PSessionErrorTimeout = 4, // target isn't responding, perhaps not calling AcceptP2PSessionWithUser() + // corporate firewalls can also block this (NAT traversal is not firewall traversal) + // make sure that UDP ports 3478, 4379, and 4380 are open in an outbound direction + k_EP2PSessionErrorMax = 5 +} EP2PSessionError_t; + +typedef enum { + + k_EFriendFlagNone = 0x00, + k_EFriendFlagBlocked = 0x01, + k_EFriendFlagFriendshipRequested = 0x02, + k_EFriendFlagImmediate = 0x04, // "regular" friend + k_EFriendFlagClanMember = 0x08, + k_EFriendFlagOnGameServer = 0x10, + // k_EFriendFlagHasPlayedWith = 0x20, // not currently used + // k_EFriendFlagFriendOfFriend = 0x40, // not currently used + k_EFriendFlagRequestingFriendship = 0x80, + k_EFriendFlagRequestingInfo = 0x100, + k_EFriendFlagIgnored = 0x200, + k_EFriendFlagIgnoredFriend = 0x400, + // k_EFriendFlagSuggested = 0x800, // not used + k_EFriendFlagChatMember = 0x1000, + k_EFriendFlagAll = 0xFFFF, + +} EFriendFlags_t; + +// size limits on Rich Presence data +enum { k_cchMaxRichPresenceKeys = 30 }; +enum { k_cchMaxRichPresenceKeyLength = 64 }; +enum { k_cchMaxRichPresenceValueLength = 256 }; + +/*______________________________________________________________________________________________________________________________ + + STEAM TYPES +______________________________________________________________________________________________________________________________*/ + +// Redefube class pointers to void +typedef void ISteamFriends; +typedef void ISteamUserStats; +typedef void ISteamUtils; +typedef void ISteamUser; +typedef void ISteamNetworking; + +typedef int32_t HSteamPipe; +typedef int32_t HSteamUser; + +typedef int E_iCallBack_t; + +typedef uint32_t SNetSocket_t; // CreateP2PConnectionSocket() +typedef uint32_t SNetListenSocket_t; // CreateListenSocket() + +typedef uint64_t uint64_steamid; +typedef uint64_t SteamAPICall_t; + +/*______________________________________________________________________________________________________________________________ + + PACKING: 1 Byte +______________________________________________________________________________________________________________________________*/ + +#pragma pack( push, 1 ) + +typedef struct +{ + // 64 bits total + union { + struct SteamIDComponent_t + { +#ifdef VALVE_BIG_ENDIAN + EUniverse_t m_EUniverse : 8; // universe this account belongs to + unsigned int m_EAccountType : 4; // type of account - can't show as EAccountType, due to signed / unsigned difference + unsigned int m_unAccountInstance : 20; // dynamic instance ID + uint32_t m_unAccountID : 32; // unique account identifier +#else + uint32_t m_unAccountID : 32; // unique account identifier + unsigned int m_unAccountInstance : 20; // dynamic instance ID + unsigned int m_EAccountType : 4; // type of account - can't show as EAccountType, due to signed / unsigned difference + EUniverse_t m_EUniverse : 8; // universe this account belongs to +#endif + } m_comp; + + uint64_t m_unAll64Bits; + }; +} CSteamID; + +typedef struct{ + union { + + uint32_t m_unIPv4; // Host order + uint8_t m_rgubIPv6[16]; // Network order! Same as inaddr_in6. (0011:2233:4455:6677:8899:aabb:ccdd:eeff) + + // Internal use only + uint64_t m_ipv6Qword[2]; // big endian + + }; + + ESteamIPType_t m_eType; + +} SteamIPAddress_t; + +#pragma pack(pop) + +/*______________________________________________________________________________________________________________________________ + + PACKING: Some kind of strange alignment thing +______________________________________________________________________________________________________________________________*/ + +#if defined(__linux__) || defined(__APPLE__) +// The 32-bit version of gcc has the alignment requirement for uint64 and double set to +// 4 meaning that even with #pragma pack(8) these types will only be four-byte aligned. +// The 64-bit version of gcc has the alignment requirement for these types set to +// 8 meaning that unless we use #pragma pack(4) our structures will get bigger. +// The 64-bit structure packing has to match the 32-bit structure packing for each platform. + #define VALVE_CALLBACK_PACK_SMALL +#else + #define VALVE_CALLBACK_PACK_LARGE +#endif + +#if defined( VALVE_CALLBACK_PACK_SMALL ) + #pragma pack( push, 4 ) +#elif defined( VALVE_CALLBACK_PACK_LARGE ) + #pragma pack( push, 8 ) +#else + #error steam_api_common.h should define VALVE_CALLBACK_PACK_xxx +#endif + +typedef struct{ + + uint8_t m_bConnectionActive; // true if we've got an active open connection + uint8_t m_bConnecting; // true if we're currently trying to establish a connection + uint8_t m_eP2PSessionError; // last error recorded (see enum above) + uint8_t m_bUsingRelay; // true if it's going through a relay server (TURN) + int32_t m_nBytesQueuedForSend; + int32_t m_nPacketsQueuedForSend; + uint32_t m_nRemoteIP; // potential IP:Port of remote host. Could be TURN server. + uint16_t m_nRemotePort; // Only exists for compatibility with older authentication api's + +} P2PSessionState_t; + +typedef struct { + + HSteamUser m_hSteamUser; // Specific user to whom this callback applies. + int m_iCallback; // Callback identifier. (Corresponds to the k_iCallback enum in the callback structure.) + uint8_t *m_pubParam; // Points to the callback structure + int m_cubParam; // Size of the data pointed to by m_pubParam + +} CallbackMsg_t; + +typedef struct { + + SteamAPICall_t m_hAsyncCall; + int m_iCallback; + uint32_t m_cubParam; + +} SteamAPICallCompleted_t; +#define SW_CBID_SteamAPICallCompleted (k_iSteamUtilsCallbacks + 3) + + +// callback notification - a user wants to talk to us over the P2P channel via the SendP2PPacket() API +// in response, a call to AcceptP2PPacketsFromUser() needs to be made, if you want to talk with them +typedef struct { + + CSteamID m_steamIDRemote; // user who wants to talk to us + +} P2PSessionRequest_t; +#define SW_CBID_P2PSessionRequest (k_iSteamNetworkingCallbacks + 2) + + +// callback notification - packets can't get through to the specified user via the SendP2PPacket() API +// all packets queued packets unsent at this point will be dropped +// further attempts to send will retry making the connection (but will be dropped if we fail again) +typedef struct { + + CSteamID m_steamIDRemote; // user we were sending packets to + uint8_t m_eP2PSessionError; // EP2PSessionError indicating why we're having trouble + +} P2PSessionConnectFail_t; +#define SW_CBID_P2PSessionConnectFail (k_iSteamNetworkingCallbacks + 3) + +// callback notification - status of a socket has changed +// used as part of the CreateListenSocket() / CreateP2PConnectionSocket() +typedef struct { + + SNetSocket_t m_hSocket; // the socket used to send/receive data to the remote host + SNetListenSocket_t m_hListenSocket; // this is the server socket that we were listening on; NULL if this was an outgoing connection + CSteamID m_steamIDRemote; // remote steamID we have connected to, if it has one + int m_eSNetSocketState; // socket state, ESNetSocketState + +} SocketStatusCallback_t; +#define SW_CBID_SocketStatusCallback (k_iSteamNetworkingCallbacks + 1) + +//----------------------------------------------------------------------------- +// Purpose: called when the user tries to join a game from their friends list +// rich presence will have been set with the "connect" key which is set here +//----------------------------------------------------------------------------- +typedef struct { + CSteamID m_steamIDFriend; // the friend they did the join via (will be invalid if not directly via a friend) + char m_rgchConnect[k_cchMaxRichPresenceValueLength]; +} GameRichPresenceJoinRequested_t; +#define SW_CBID_GameRichPresenceJoinRequested (k_iSteamFriendsCallbacks + 37) + +// Making SURE we have alignment +#ifdef STRUCTURE_PACK_CHECK +typedef struct { + uint32_t m_u32; + uint64_t m_u64; + uint16_t m_u16; + double m_d; +} ValvePackingSentinel_t; +#endif + +#pragma pack( pop ) + +#ifndef NO_STEAM + +/*______________________________________________________________________________________________________________________________ + + Forward linker declerations. Type: cdecl (obviously) +______________________________________________________________________________________________________________________________*/ + +void SteamAPI_Shutdown(); +int SteamAPI_Init(); +int SteamAPI_RestartAppIfNecessary( uint32_t unOwnAppID ); + +void SteamAPI_ManualDispatch_Init(); +void SteamAPI_ManualDispatch_RunFrame( HSteamPipe hSteamPipe ); +int SteamAPI_ManualDispatch_GetNextCallback( HSteamPipe hSteamPipe, CallbackMsg_t *pCallbackMsg ); +void SteamAPI_ManualDispatch_FreeLastCallback( HSteamPipe hSteamPipe ); +int SteamAPI_ManualDispatch_GetAPICallResult( HSteamPipe hSteamPipe, SteamAPICall_t hSteamAPICall, void *pCallback, int cubCallback, int iCallbackExpected, int *pbFailed ); + + +char *SteamAPI_ISteamFriends_GetPersonaName( ISteamFriends *self ); +const char *SteamAPI_ISteamFriends_GetFriendPersonaName( ISteamFriends *self, uint64_steamid steamIDFriend ); +uint64_steamid SteamAPI_ISteamUser_GetSteamID( ISteamUser *self ); +int SteamAPI_ISteamFriends_SetRichPresence( ISteamFriends* self, const char * pchKey, const char * pchValue ); +int SteamAPI_ISteamFriends_HasFriend( ISteamFriends* self, uint64_steamid steamIDFriend, int iFriendFlags ); +int SteamAPI_ISteamFriends_GetSmallFriendAvatar ( ISteamFriends *self, uint64_steamid steamIDFriend ); // 32x32 +int SteamAPI_ISteamFriends_GetMediumFriendAvatar ( ISteamFriends *self, uint64_steamid steamIDFriend ); +int SteamAPI_ISteamFriends_GetLargeFriendAvatar ( ISteamFriends *self, uint64_steamid steamIDFriend ); +int SteamAPI_ISteamUtils_GetImageSize( ISteamUtils *self, int iImage, uint32_t *pnWidth, uint32_t *pnHeight ); +int SteamAPI_ISteamUtils_GetImageRGBA( ISteamUtils *self, int iImage, uint8_t *pubDest, int nDestBufferSize ); +int SteamAPI_ISteamUserStats_SetAchievement( ISteamUserStats *self, const char *pchName ); + +HSteamPipe SteamAPI_GetHSteamPipe(); +HSteamUser SteamAPI_GetHSteamUser(); + +/* NETWORKING INTERFACES +-------------------------*/ + +int /* SendP2PPacket */ SteamAPI_ISteamNetworking_SendP2PPacket( ISteamNetworking *self, + uint64_steamid steamIDRemote, + const void *pubData, + uint32_t cubData, + EP2PSend_t eP2PSendType, + int nChannel +); + +int /* IsP2PPacketAvailable */ SteamAPI_ISteamNetworking_IsP2PPacketAvailable( ISteamNetworking *self, + uint32_t *pcubMsgSize, + int nChannel +); + +int /* ReadP2PPacket */ SteamAPI_ISteamNetworking_ReadP2PPacket( ISteamNetworking *self, + void *pubDest, + uint32_t cubDest, + uint32_t *pcubMsgSize, + CSteamID *psteamIDRemote, + int nChannel +); + +int /* AcceptP2PSessionWithUser */ SteamAPI_ISteamNetworking_AcceptP2PSessionWithUser( ISteamNetworking *self, + uint64_steamid steamIDRemote +); + +int /* CloseP2PSessionWithUser */ SteamAPI_ISteamNetworking_CloseP2PSessionWithUser( ISteamNetworking *self, + uint64_steamid steamIDRemote +); + +int /* CloseP2PChannelWithUser */ SteamAPI_ISteamNetworking_CloseP2PChannelWithUser( ISteamNetworking *self, + uint64_steamid steamIDRemote, + int nChannel +); + +int /* GetP2PSessionState */ SteamAPI_ISteamNetworking_GetP2PSessionState( ISteamNetworking *self, + uint64_steamid steamIDRemote, + P2PSessionState_t *pConnectionState +); + +int /* AllowP2PPacketRelay */ SteamAPI_ISteamNetworking_AllowP2PPacketRelay( ISteamNetworking *self, + int bAllow +); + +SNetListenSocket_t /* CreateListenSocket */ SteamAPI_ISteamNetworking_CreateListenSocket( ISteamNetworking *self, + int nVirtualP2PPort, + SteamIPAddress_t nIP, + uint16_t nPort, + int bAllowUseOfPacketRelay +); + +SNetSocket_t /* CreateP2PConnectionSocket */ SteamAPI_ISteamNetworking_CreateP2PConnectionSocket( ISteamNetworking *self, + uint64_steamid steamIDTarget, + int nVirtualPort, + int nTimeoutSec, + int bAllowUseOfPacketRelay +); + +SNetSocket_t /* CreateConnectionSocket */ SteamAPI_ISteamNetworking_CreateConnectionSocket( ISteamNetworking *self, + SteamIPAddress_t nIP, + uint16_t nPort, + int nTimeoutSec +); + +int /* DestroySocket */ SteamAPI_ISteamNetworking_DestroySocket( ISteamNetworking *self, + SNetSocket_t hSocket, + int bNotifyRemoteEnd +); + +int /* DestroyListenSocket */ SteamAPI_ISteamNetworking_DestroyListenSocket( ISteamNetworking *self, + SNetListenSocket_t hSocket, + int bNotifyRemoteEnd +); + +int /* SendDataOnSocket */ SteamAPI_ISteamNetworking_SendDataOnSocket( ISteamNetworking *self, + SNetSocket_t hSocket, + void *pubData, + uint32_t cubData, + int bReliable +); + +int /* IsDataAvailableOnSocket */ SteamAPI_ISteamNetworking_IsDataAvailableOnSocket( ISteamNetworking *self, + SNetSocket_t hSocket, + uint32_t *pcubMsgSize +); + +int /* RetrieveDataFromSocket */ SteamAPI_ISteamNetworking_RetrieveDataFromSocket( ISteamNetworking *self, + SNetSocket_t hSocket, + void *pubDest, + uint32_t cubDest, + uint32_t *pcubMsgSize +); + +int /* IsDataAvailable */ SteamAPI_ISteamNetworking_IsDataAvailable( ISteamNetworking *self, + SNetListenSocket_t hListenSocket, + uint32_t *pcubMsgSize, + SNetSocket_t *phSocket +); + +int /* RetrieveData */ SteamAPI_ISteamNetworking_RetrieveData( ISteamNetworking *self, + SNetListenSocket_t hListenSocket, + void *pubDest, + uint32_t cubDest, + uint32_t *pcubMsgSize, + SNetSocket_t *phSocket +); + +int /* GetSocketInfo */ SteamAPI_ISteamNetworking_GetSocketInfo( ISteamNetworking *self, + SNetSocket_t hSocket, + CSteamID *pSteamIDRemote, + int *peSocketStatus, + SteamIPAddress_t *punIPRemote, + uint16_t *punPortRemote +); + +int /* GetListenSocketInfo */ SteamAPI_ISteamNetworking_GetListenSocketInfo( ISteamNetworking *self, + SNetListenSocket_t hListenSocket, + SteamIPAddress_t *pnIP, + uint16_t *pnPort +); + +ESNetSocketConnectionType_t /* GetSocketConnectionType */ SteamAPI_ISteamNetworking_GetSocketConnectionType( ISteamNetworking *self, + SNetSocket_t hSocket +); + +int /* GetMaxPacketSize */ SteamAPI_ISteamNetworking_GetMaxPacketSize( ISteamNetworking *self, + SNetSocket_t hSocket +); + +#define sw_SteamAPI_Shutdown SteamAPI_Shutdown +#define sw_SteamAPI_Init() SteamAPI_Init() +#define sw_SteamAPI_RestartAppIfNecessary(...) SteamAPI_RestartAppIfNecessary( __VA_ARGS__ ) + +// This method can only be cleared correctly in first party c++ mode +// See: sw_SteamAPI_ManualDispatch_Init +// #define sw_SteamAPI_RunCallbacks() + +// Steam pipe stuff +#define sw_SteamAPI_GetHSteamPipe() SteamAPI_GetHSteamPipe() +#define sw_SteamAPI_GetHSteamUser() SteamAPI_GetHSteamUser() + +#define sw_SteamAPI_ManualDispatch_Init() SteamAPI_ManualDispatch_Init() +#define sw_SteamAPI_ManualDispatch_GetNextCallback(...) SteamAPI_ManualDispatch_GetNextCallback( __VA_ARGS__ ) +#define sw_SteamAPI_ManualDispatch_FreeLastCallback(...) SteamAPI_ManualDispatch_FreeLastCallback( __VA_ARGS__ ) +#define sw_SteamAPI_ManualDispatch_GetAPICallResult(...) SteamAPI_ManualDispatch_GetAPICallResult( __VA_ARGS__ ) +#define sw_SteamAPI_ManualDispatch_RunFrame(...) SteamAPI_ManualDispatch_RunFrame( __VA_ARGS__ ) + +#define sw_GetPersonaName(...) SteamAPI_ISteamFriends_GetPersonaName( __thinsteam_friends, ##__VA_ARGS__ ) +#define sw_GetFriendPersonaName(...) SteamAPI_ISteamFriends_GetFriendPersonaName( __thinsteam_friends, __VA_ARGS__ ) +#define sw_GetSteamID(...) SteamAPI_ISteamUser_GetSteamID( __thinsteam_user, ##__VA_ARGS__ ) +#define sw_HasFriend(...) SteamAPI_ISteamFriends_HasFriend( __thinsteam_friends, __VA_ARGS__ ) +#define sw_SetRichPresence(...) SteamAPI_ISteamFriends_SetRichPresence( __thinsteam_friends, __VA_ARGS__ ) + +#define sw_GetSmallFriendAvatar(...) SteamAPI_ISteamFriends_GetSmallFriendAvatar( __thinsteam_friends, ##__VA_ARGS__ ) +#define sw_GetMediumFriendAvatar(...) SteamAPI_ISteamFriends_GetMediumFriendAvatar( __thinsteam_friends, ##__VA_ARGS__ ) +#define sw_GetLargeFriendAvatar(...) SteamAPI_ISteamFriends_GetLargeFriendAvatar( __thinsteam_friends, ##__VA_ARGS__ ) +#define sw_GetImageSize(...) SteamAPI_ISteamUtils_GetImageSize( __thinsteam_utils, ##__VA_ARGS__ ) +#define sw_GetImageRGBA(...) SteamAPI_ISteamUtils_GetImageRGBA( __thinsteam_utils, ##__VA_ARGS__ ) + +#define sw_SetAchievement(...) SteamAPI_ISteamUserStats_SetAchievement( __thinsteam_stats, ##__VA_ARGS__ ) + +// https://partner.steamgames.com/doc/api/ISteamNetworkingUtils +#define sw_SendP2PPacket(...) SteamAPI_ISteamNetworking_SendP2PPacket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_IsP2PPacketAvailable(...) SteamAPI_ISteamNetworking_IsP2PPacketAvailable( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_ReadP2PPacket(...) SteamAPI_ISteamNetworking_ReadP2PPacket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_AcceptP2PSessionWithUser(...) SteamAPI_ISteamNetworking_AcceptP2PSessionWithUser( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_CloseP2PSessionWithUser(...) SteamAPI_ISteamNetworking_CloseP2PSessionWithUser( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_CloseP2PChannelWithUser(...) SteamAPI_ISteamNetworking_CloseP2PChannelWithUser( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_GetP2PSessionState(...) SteamAPI_ISteamNetworking_GetP2PSessionState( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_AllowP2PPacketRelay(...) SteamAPI_ISteamNetworking_AllowP2PPacketRelay( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_CreateListenSocket(...) SteamAPI_ISteamNetworking_CreateListenSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_CreateP2PConnectionSocket(...) SteamAPI_ISteamNetworking_CreateP2PConnectionSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_CreateConnectionSocket(...) SteamAPI_ISteamNetworking_CreateConnectionSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_DestroySocket(...) SteamAPI_ISteamNetworking_DestroySocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_DestroyListenSocket(...) SteamAPI_ISteamNetworking_DestroyListenSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_SendDataOnSocket(...) SteamAPI_ISteamNetworking_SendDataOnSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_IsDataAvailableOnSocket(...) SteamAPI_ISteamNetworking_IsDataAvailableOnSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_RetrieveDataFromSocket(...) SteamAPI_ISteamNetworking_RetrieveDataFromSocket( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_IsDataAvailable(...) SteamAPI_ISteamNetworking_IsDataAvailable( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_RetrieveData(...) SteamAPI_ISteamNetworking_RetrieveData( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_GetSocketInfo(...) SteamAPI_ISteamNetworking_GetSocketInfo( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_GetListenSocketInfo(...) SteamAPI_ISteamNetworking_GetListenSocketInfo( __thinsteam_net, ##__VA_ARGS__ ) +#define sw_GetSocketConnectionType(...) SteamAPI_ISteamNetworking_GetSocketConnectionType( __thinsteam_net,##__VA_ARGS__ ) +#define sw_GetMaxPacketSize(...) SteamAPI_ISteamNetworking_GetMaxPacketSize( __thinsteam_net, ##__VA_ARGS__ ) + +#define _sw__fill( self, func, ... ) func( self, ##__VA_ARGS__ ) +#endif + +/*______________________________________________________________________________________________________________________________ + + WRAPPER LAYER +______________________________________________________________________________________________________________________________*/ + +ISteamFriends *__thinsteam_friends = NULL; +ISteamUser *__thinsteam_user = NULL; +ISteamUtils *__thinsteam_utils = NULL; +ISteamUserStats *__thinsteam_stats = NULL; +ISteamNetworking *__thinsteam_net = NULL; + +ISteamFriends *SteamAPI_SteamFriends_v017(); +ISteamUser *SteamAPI_SteamUser_v021(); +ISteamUtils *SteamAPI_SteamUtils_v009(); +ISteamUserStats *SteamAPI_SteamUserStats_v012(); +ISteamNetworking *SteamAPI_SteamNetworking_v006(); + +HSteamPipe g_hSteamPipe; + +GLuint get_player_image( uint64_steamid usr ) +{ + GLuint gl_img; + + uint32_t x = 64, y = 64; + int steam_image; + + steam_image = sw_GetMediumFriendAvatar( usr ); + sw_GetImageSize( steam_image, &x, &y ); + + unsigned char * img_buf = (unsigned char *)calloc( x * y * 4, 1 ); + + sw_GetImageRGBA( steam_image, img_buf, x * y * 4 ); + + glGenTextures( 1, &gl_img ); + glBindTexture( GL_TEXTURE_2D, gl_img ); + + glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, x, y, 0, GL_RGBA, GL_UNSIGNED_BYTE, img_buf ); + glGenerateMipmap( GL_TEXTURE_2D ); + + glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE ); + glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE ); + + glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR ); + glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR ); + + free( img_buf ); + + return gl_img; +} + +int sw_vipid( uint64_steamid usr ) +{ + static uint64_steamid massiveLEGENDS[] = + { + 76561198134179460, // Spud + 76561198020563704, // Assasssin + 76561198062961277, // lopen + 76561198084693332 // ernie + }; + + for( int i = 0; i < sizeof(massiveLEGENDS)/sizeof(massiveLEGENDS[0]); i ++ ) + { + if( massiveLEGENDS[i] == usr ) + return 1; + } + + return 0; +} + +// Initialize steamworks through this function and your appid +int sw_init( uint32_t appid ) { + +#ifdef STRUCTURE_PACK_CHECK + +// This code should never fail and if it does something went wrong with the build +// Remember to undefine STRUCTURE_PACK_CHECK for publishing!!! + +#if defined(VALVE_CALLBACK_PACK_SMALL) + if( sizeof(ValvePackingSentinel_t) != 24 ){ + printf( "Struct packing error: ValvePackingSentinel_t expected 24 got %i\nThe application is built incorrectly\n", (int)sizeof(ValvePackingSentinel_t)); + return 0; + } +#elif defined(VALVE_CALLBACK_PACK_LARGE) + if( sizeof(ValvePackingSentinel_t) != 32 ){ + printf( "Struct packing error: ValvePackingSentinel_t expected 32 got %i\nThe application is built incorrectly\n", (int)sizeof(ValvePackingSentinel_t)); + return 0; + } +#else + #error ??? +#endif + +#endif + + #ifdef STEAMWORKS_VERBOSE + printf( "Initializing steamworks..\n" ); + #endif + + if( sw_SteamAPI_RestartAppIfNecessary( appid ) == 1 ){ + #ifdef STEAMWORKS_VERBOSE + printf( "Restarting via steam\n" ); + #endif + return 0; + } + + if( !sw_SteamAPI_Init() ){ + #ifdef STEAMWORKS_VERBOSE + printf( "Steamworks connection failed\n" ); + #endif + return 0; + } + + // We are using C so we have to deal with callbacks microsoft event loop style + sw_SteamAPI_ManualDispatch_Init(); + + __thinsteam_friends = SteamAPI_SteamFriends_v017(); + __thinsteam_user = SteamAPI_SteamUser_v021(); + __thinsteam_utils = SteamAPI_SteamUtils_v009(); + __thinsteam_stats = SteamAPI_SteamUserStats_v012(); + __thinsteam_net = SteamAPI_SteamNetworking_v006(); + + // This might happen if the DLL is too old + if( !__thinsteam_friends | !__thinsteam_user | !__thinsteam_utils | !__thinsteam_stats | !__thinsteam_net ){ + #ifdef STEAMWORKS_VERBOSE + printf( "Interface hooks failed\n" ); + #endif + return 0; + } + + g_hSteamPipe = sw_SteamAPI_GetHSteamPipe(); + + #ifdef STEAMWORKS_VERBOSE + printf( "Steamworks ready\n" ); + #endif + + return 1; +} + +void sw_init_postgl() +{ + _localplayer_image = get_player_image( sw_GetSteamID() ); + strcpy( _localplayer_name, sw_GetPersonaName() ); +} + +/*______________________________________________________________________________________________________________________________ + + USER CODE +______________________________________________________________________________________________________________________________*/ + +// Macro Creates (args): +// Function(args) pointer definition +// Callback global pointer +// sw_SetCallback (*ptr) function + +#define _swCallbackPair( CNAME, ... ) \ +typedef void(*sw_##CNAME##Fun)( __VA_ARGS__ ); \ +sw_##CNAME##Fun CNAME = NULL; \ +void sw_Set##CNAME##Callback(sw_##CNAME##Fun d) { CNAME = d; } + +_swCallbackPair( OnSocketStatus, SNetSocket_t, SNetListenSocket_t, CSteamID, int ) +_swCallbackPair( OnP2PSessionConnectFail, CSteamID, EP2PSessionError_t ) +_swCallbackPair( OnP2PSessionRequest, CSteamID ) +_swCallbackPair( OnSteamJoinRequest, CSteamID, char * ) + +void sw_RunSteamEventLoop(void) +{ + sw_SteamAPI_ManualDispatch_RunFrame( g_hSteamPipe ); + CallbackMsg_t callback; + while( sw_SteamAPI_ManualDispatch_GetNextCallback( g_hSteamPipe, &callback ) ){ + + // Check for dispatching API call results + if( callback.m_iCallback == SW_CBID_SteamAPICallCompleted ){ + + SteamAPICallCompleted_t *pCallCompleted = (SteamAPICallCompleted_t *)&callback; + void *pTmpCallResult = malloc( pCallCompleted->m_cubParam ); + int bFailed; + + if( sw_SteamAPI_ManualDispatch_GetAPICallResult( g_hSteamPipe, pCallCompleted->m_hAsyncCall, pTmpCallResult, \ + pCallCompleted->m_cubParam, pCallCompleted->m_iCallback, &bFailed ) ){ + + // Dispatch the call result to the registered handler(s) for the + // call identified by pCallCompleted->m_hAsyncCall + + } + + free( pTmpCallResult ); + + } else { + + // Look at callback.m_iCallback to see what kind of callback it is, + // and dispatch to appropriate handler(s) + + vg_info( "steamworks_event::callback( %i )\n", callback.m_iCallback ); + + void *data = callback.m_pubParam; + + switch( callback.m_iCallback ){ + + case SW_CBID_P2PSessionRequest: if( OnP2PSessionRequest ) OnP2PSessionRequest( + ((P2PSessionRequest_t *)data)->m_steamIDRemote + ); break; + + case SW_CBID_SocketStatusCallback: if( OnSocketStatus ) OnSocketStatus( + ((SocketStatusCallback_t *)data)->m_hSocket, + ((SocketStatusCallback_t *)data)->m_hListenSocket, + ((SocketStatusCallback_t *)data)->m_steamIDRemote, + ((SocketStatusCallback_t *)data)->m_eSNetSocketState + ); break; + + case SW_CBID_P2PSessionConnectFail: if( OnP2PSessionConnectFail ) OnP2PSessionConnectFail( + ((P2PSessionConnectFail_t *)data)->m_steamIDRemote, + ((P2PSessionConnectFail_t *)data)->m_eP2PSessionError + ); break; + + case SW_CBID_GameRichPresenceJoinRequested: if( OnSteamJoinRequest ) OnSteamJoinRequest( + ((GameRichPresenceJoinRequested_t *)data)->m_steamIDFriend, + ((GameRichPresenceJoinRequested_t *)data)->m_rgchConnect + ); break; + + default: break; + + } + + } + + SteamAPI_ManualDispatch_FreeLastCallback( g_hSteamPipe ); + + } + +} + +/*______________________________________________________________________________________________________________________________ + + NO_STEAM +______________________________________________________________________________________________________________________________*/ + +#else // #define NO_STEAM +// In case we dont want to spam the steam servers + +char *__str_sw_unlinked = "__NO_STEAMWORKS__"; + +#define SW_BLANK_RETURN 0 + +#define sw_init( x ) 1 +#define sw_exit() +#define sw_runcallbacks() +#define sw_get_persona_name() __str_sw_unlinked +#define sw_get_steamid() SW_BLANK_RETURN +#define sw_get_friend_smallavatar( x ) SW_BLANK_RETURN +#define sw_get_friend_mediumavatar( x ) SW_BLANK_RETURN +#define sw_get_friend_largeavatar( x ) SW_BLANK_RETURN + +#define sw_get_image_size( x, y, z ) +#define sw_get_image_rgba( x, y, z ) +#define sw_set_achievement( x ) + +#endif + +#endif + diff --git a/vg/config.h b/vg/config.h new file mode 100644 index 0000000..01c9b0a --- /dev/null +++ b/vg/config.h @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +static struct button_binding vg_button_binds[] = +{ + { .name = "fire0", .bind = GLFW_MOUSE_BUTTON_LEFT }, + { .name = "fire1", .bind = GLFW_MOUSE_BUTTON_RIGHT }, + { .name = "noclip", .bind = GLFW_KEY_V, }, + { .name = "jump", .bind = GLFW_KEY_SPACE } +}; + +static struct axis_binding vg_axis_binds[] = +{ + { .name = "fire0", .positive = GLFW_MOUSE_BUTTON_LEFT, .negative = -1 }, + { .name = "fire1", .positive = GLFW_MOUSE_BUTTON_RIGHT, .negative = -1 }, + { .name = "horizontal", .positive = GLFW_KEY_D, .negative = GLFW_KEY_A }, + { .name = "vertical", .positive = GLFW_KEY_W, .negative = GLFW_KEY_S } +}; diff --git a/vg/vg.c b/vg/vg.c new file mode 100644 index 0000000..e69de29 diff --git a/vg/vg.h b/vg/vg.h new file mode 100644 index 0000000..5220ae7 --- /dev/null +++ b/vg/vg.h @@ -0,0 +1,346 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +#include +#include +#include +#include +#include +#include + +#include "gl/glad/glad.h" +#include "gl/glfw3.h" + +void vg_register_exit( void( *funcptr )(void), const char *name ); +void vg_exiterr( const char *strErr ); + +#include "vg/vg_platform.h" +#include "vg/vg_io.h" +#include "vg/vg_audio.h" + +#include "steam/steamworks_thin.h" + +static inline float vg_get_axis( const char *axis ) __attribute__((unused)); +static inline int vg_get_button( const char *button ) __attribute__((unused)); +static inline int vg_get_button_down( const char *button ) __attribute__((unused)); +static inline int vg_get_button_up( const char *button ) __attribute__((unused)); + +// Globals +GLFWwindow* vg_window; +int vg_window_x = 1280; +int vg_window_y = 720; + +float vg_mouse_x; +float vg_mouse_y; + +float vg_time; +float vg_time_last; +float vg_time_delta; + +// Input +// =========================================================================================================== +GLFWgamepadstate vg_gamepad; +int vg_gamepad_ready = 0; +const char *vg_gamepad_name = NULL; +int vg_gamepad_id; + +enum EInputMode +{ + k_EInputMode_pc, + k_EInputMode_gamepad +} +vg_input_mode; + +static struct axis_binding +{ + const char *name; + union + { + int positive; + int bind; + }; + int negative; + + float value; +} +vg_axis_binds[]; + +static struct button_binding +{ + const char *name; + int bind; + + int value; int prev; +} +vg_button_binds[]; + +#include "vg/config.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wreturn-type" + +static inline float vg_get_axis( const char *axis ) +{ + for( int i = 0; i < vg_list_size( vg_axis_binds ); i ++ ) + { + if( !strcmp( axis, vg_axis_binds[i].name ) ) + { + return vg_axis_binds[i].value; + } + } +} + +static inline struct button_binding *vg_get_button_ptr( const char *button ) +{ + for( int i = 0; i < vg_list_size( vg_button_binds ); i ++ ) + { + if( !strcmp( button, vg_button_binds[i].name ) ) + { + return vg_button_binds + i; + } + } +} +#pragma GCC diagnostic pop + +static inline int vg_get_button( const char *button ) +{ + return vg_get_button_ptr( button )->value; +} + +static inline int vg_get_button_down( const char *button ) +{ + struct button_binding *bind = vg_get_button_ptr( button ); + return bind->value & (bind->value ^ bind->prev); +} + +static inline int vg_get_button_up( const char *button ) +{ + struct button_binding *bind = vg_get_button_ptr( button ); + return bind->prev & (bind->value ^ bind->prev); +} + +static inline int key_is_keyboard( int const id ) +{ + static_assert( GLFW_MOUSE_BUTTON_LAST < GLFW_KEY_SPACE, "GLFW: Mouse has too many buttons" ); + return id > GLFW_MOUSE_BUTTON_LAST; +} + +// Mouse AND Keyboard get button press +int get_button_cross_device( int const id ) +{ + if( key_is_keyboard( id ) ) + { + return glfwGetKey( vg_window, id ); + } + else + { + return glfwGetMouseButton( vg_window, id ) == GLFW_PRESS; + } +} + +void vg_update_inputs(void) +{ + // Update button inputs + for( int i = 0; i < vg_list_size( vg_button_binds ); i ++ ) + { + struct button_binding *binding = vg_button_binds + i; + binding->prev = binding->value; + + if( vg_input_mode == k_EInputMode_pc ) + { + binding->value = get_button_cross_device( binding->bind ); + } + else + { + binding->value = vg_gamepad.buttons[ binding->bind ]; + } + } + + // Update axis inputs + for( int i = 0; i < vg_list_size( vg_axis_binds ); i ++ ) + { + struct axis_binding *binding = vg_axis_binds + i; + + if( vg_input_mode == k_EInputMode_pc ) + { + binding->value = get_button_cross_device( binding->positive ); + binding->value -= get_button_cross_device( binding->negative ); + } + else + { + binding->value = vg_gamepad.axes[ binding->bind ]; + } + } +} + +// Engine main +// =========================================================================================================== + +#define VG_GAMELOOP __attribute__((weak)) + +void( *vg_on_exit[16] )(void); +u32 vg_exit_count = 0; + +// Add a shutdown step +void vg_register_exit( void( *funcptr )(void), const char *name ) +{ + vg_info( "exit registered: (%u)'%s'\n", vg_exit_count, name ); + vg_on_exit[ vg_exit_count ++ ] = funcptr; +} + +void vg_exit(void) +{ + for( int i = vg_exit_count-1; i >= 0; i -- ) + { + vg_info( "engine_exit[%d]()\n", i ); + vg_on_exit[i](); + } + + vg_info( "done\n" ); +} + +// Forcefully exit program after error +void vg_exiterr( const char *strErr ) +{ + vg_error( "Engine Fatal: %s\n", strErr ); + vg_exit(); + exit(0); +} + +// Callbacks +// --------- + +void vg_mouse_callback( GLFWwindow* ptrW, double xpos, double ypos ) +{ + vg_mouse_x = xpos; + vg_mouse_y = ypos; +} + +void vg_scroll_callback( GLFWwindow* ptrW, double xoffset, double yoffset ) +{ + +} + +void vg_framebuffer_resize_callback( GLFWwindow *ptrW, int w, int h ) +{ + vg_window_x = w; + vg_window_y = h; +} + +void vg_start(void) VG_GAMELOOP; +void vg_update(void) VG_GAMELOOP; +void vg_render(void) VG_GAMELOOP; +void vg_ui(void) VG_GAMELOOP; +void vg_free(void) VG_GAMELOOP; + +void vg_init( int argc, char *argv[], const char *window_name ) +{ +#ifdef VG_STEAM + // Initialize steamworks + if( !sw_init( 1218140U ) ) + { + vg_exiterr( "Steamworks failed to initialize" ); + } + else + { + vg_register_exit( &sw_SteamAPI_Shutdown, "SteamAPI" ); + } +#endif + + // Context creation + // ========================================================================================================================== + glfwInit(); + glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 ); + glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 3 ); + glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE ); + glfwWindowHint( GLFW_OPENGL_DEBUG_CONTEXT, GL_TRUE ); + + glfwWindowHint( GLFW_SAMPLES, 4 ); + + GLFWmonitor *monitor_primary = glfwGetPrimaryMonitor(); + + const GLFWvidmode *mode = glfwGetVideoMode( monitor_primary ); + glfwWindowHint( GLFW_RED_BITS, mode->redBits ); + glfwWindowHint( GLFW_GREEN_BITS, mode->greenBits ); + glfwWindowHint( GLFW_BLUE_BITS, mode->blueBits ); + glfwWindowHint( GLFW_REFRESH_RATE, mode->refreshRate ); + + if( !(vg_window = glfwCreateWindow( vg_window_x, vg_window_y, window_name, NULL, NULL)) ) + { + vg_exiterr( "GLFW Failed to initialize" ); + } + else + { + vg_register_exit( &glfwTerminate, "glfwTerminate" ); + } + + glfwMakeContextCurrent( vg_window ); + glfwSwapInterval( 1 ); + + // Set callbacks + glfwSetFramebufferSizeCallback( vg_window, vg_framebuffer_resize_callback ); + + glfwSetCursorPosCallback( vg_window, vg_mouse_callback ); + glfwSetScrollCallback( vg_window, vg_scroll_callback ); + + //glfwSetCharCallback( vg_window, console_proc_wchar ); + //glfwSetKeyCallback( vg_window, console_proc_key ); + //glfwSetInputMode(vg_window, GLFW_CURSOR, GLFW_CURSOR_HIDDEN); + + if( !gladLoadGLLoader((GLADloadproc)glfwGetProcAddress) ) + { + vg_exiterr( "Glad failed to initialize" ); + } + + const unsigned char* glver = glGetString( GL_VERSION ); + vg_success( "Load setup complete, OpenGL version: %s\n", glver ); + + for( int id = 0; id <= GLFW_JOYSTICK_LAST; id ++ ) + { + if( glfwJoystickIsGamepad( id ) ) + { + vg_gamepad_name = glfwGetGamepadName( id ); + vg_success( "Gamepad with mapping registered: %s\n", vg_gamepad_name ); + + vg_gamepad_ready = 1; + vg_gamepad_id = id; + + return; + } + } + + vg_audio_init(); + vg_register_exit( &vg_audio_free, "vg_audio_free" ); + + if( vg_start ) vg_start(); + + // Main gameloop + while( !glfwWindowShouldClose( vg_window ) ) + { + glfwPollEvents(); + + #ifdef VG_STEAM + sw_RunSteamEventLoop(); + #endif + + vg_time_last = vg_time; + vg_time = glfwGetTime(); + vg_time_delta = vg_min( vg_time - vg_time_last, 0.1f ); + + vg_update_inputs(); + + if( vg_update ) vg_update(); + + // Update mashed projections etc + + if( vg_render ) vg_render(); + if( vg_ui ) vg_ui(); + + glfwSwapBuffers( vg_window ); + } + + if( vg_free ) vg_free(); + vg_exit(); +} + +u32 NvOptimusEnablement = 0x00000001; +int AmdPowerXpressRequestHighPerformance = 1; diff --git a/vg/vg_audio.h b/vg/vg_audio.h new file mode 100644 index 0000000..f154983 --- /dev/null +++ b/vg/vg_audio.h @@ -0,0 +1,693 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +#define MINIAUDIO_IMPLEMENTATION +#include "dr_soft/miniaudio.h" + +#define STB_VORBIS_MAX_CHANNELS 2 +#include "stb/stb_vorbis.h" + +#define SFX_MAX_SYSTEMS 16 +#define SFX_FLAG_ONESHOT 0x1 +#define SFX_FLAG_STEREO 0x2 +#define FADEOUT_LENGTH 441 +#define FADEOUT_DIVISOR (1.f/(float)FADEOUT_LENGTH) + +typedef struct sfx_vol sfx_vol_t; +typedef struct sfx_system sfx_system_t; + +struct sfx_vol +{ + float val; + float cmp; +}; + +struct sfx_system +{ + // Source buffer start + float *source; + float *replacement; + + // Modifiers + sfx_vol_t *vol_src; + float vol; + + float spd; + + // Info + int ch; // channels + u32 end; // buffer end + u32 cur; // cursor position + u32 flags; + + // Effects + u32 snh; + u32 fadeout; + + // The 'Opposite' pointer + sfx_system_t *optr; + + // Diagnostic + float cvol; // Current signal volume + const char *name; +}; + +// 0 +int sfx_save( sfx_system_t *sys ); // Mark change to be uploaded to queue system +void sfx_sys_init(void); // Miniaudio.h init +void sfx_sys_free(void); // Shutdown audio device +sfx_system_t *sfx_alloc(void); // Create and return slot for a sound + +// 1 +void sfx_localize(void); // Copy in data from all queued +void sfx_redist(void); // Send out updates to sources +void audio_mixer_callback( ma_device *pDevice, void *pOutBuf, const void *pInput, ma_uint32 frameCount ); // miniaudio.h interface + +ma_device g_aud_device; +ma_device_config g_aud_dconfig; + +// Thread 2 - background loader ( to be moved ) +// ====================================================== + +// Thread 1 - audio engine ( spawned from miniaudio.h ) +// ====================================================== +sfx_system_t sfx_sys[SFX_MAX_SYSTEMS]; +int sfx_sys_len = 0; + +// Thread 0 - Critical transfer section +// ====================================================== +MUTEX_TYPE sfx_mux_t01; // Resources share: 0 & 1 + +sfx_system_t *sfx_q[SFX_MAX_SYSTEMS]; // Stuff changed +int sfx_q_len = 0; // How much + +// x / 2 +// ====================================================== + +// g_vol_master is never directly acessed by users +float g_master_volume = 1.f; + +sfx_vol_t g_vol_music; +sfx_vol_t g_vol_sfx; + +#define SFX_NUM_VOLUMES 2 +sfx_vol_t *g_volumes[] = { &g_vol_music, &g_vol_sfx }; + +// Decompress entire vorbis stream into buffer +float *sfx_vorbis_stream( const unsigned char *data, int len, int channels, uint32_t *samples ) +{ + int err; + stb_vorbis *pv = stb_vorbis_open_memory( data, len, &err, NULL ); + + if( !pv ) + { + vg_error( "stb_vorbis_open_memory() failed with error code: %i\n", err ); + return NULL; + } + + u32 length_samples = stb_vorbis_stream_length_in_samples( pv ); + float *buffer = (float *)malloc( length_samples * channels * sizeof( float )); + + if( !buffer ) + { + vg_error( "out of memory while allocating sound resource\n" ); + return NULL; + } + + int read_samples = stb_vorbis_get_samples_float_interleaved( pv, channels, buffer, length_samples * channels ); + if( read_samples != length_samples ) + { + vg_warn( "| warning: sample count mismatch. Expected %u got %i\n", length_samples, read_samples ); + length_samples = read_samples; + } + + *samples = length_samples; + return buffer; +} + +float *sfx_vorbis( const char *strFileName, int channels, u32 *samples ) +{ + i64 len; + void *filedata = vg_asset_read_s( strFileName, &len ); + + if( filedata ) + { + float *wav = sfx_vorbis_stream( filedata, len, channels, samples ); + free( filedata ); + return wav; + } + else + { + vg_error( "OGG load failed\n" ); + return NULL; + } +} + +typedef struct sfx_bgload sfx_bgload_t; +struct sfx_bgload +{ + char *path; + u32 channels; + + float *buffer; + u32 samples; + + void *user; + + void(*OnComplete)(sfx_bgload_t *inf); +}; + +// Thread worker for background load job +void *sfx_vorbis_a_t( void *_inf ) +{ + sfx_bgload_t *info = _inf; + + // Load the ogg clip + info->buffer = sfx_vorbis( info->path, info->channels, &info->samples ); + info->OnComplete( info ); + + return NULL; +} + +// Asynchronous resource load +int sfx_vorbis_a( const char *path, int channels, void(*OnComplete)(sfx_bgload_t *inf), void *user ) +{ + vg_info( "background job started for: %s\n", path ); + + sfx_bgload_t *params = malloc( sizeof( sfx_bgload_t ) ); + params->path = malloc( strlen( path ) + 1 ); + strcpy( params->path, path ); + params->OnComplete = OnComplete; + params->user = user; + params->channels = channels; + + return vg_thread_run( sfx_vorbis_a_t, params ); +} + +// Asynchronous load-to-system callback +struct sfx_vorbis_a_to_inf +{ + sfx_system_t *sys; + u32 flags; +}; + +#define SFX_A_FLAG_AUTOSTART 0x1 +#define SFX_A_FLAG_AUTOFREE 0x2 + +// Asynchronous load-to-system callback +void sfx_vorbis_a_to_c( sfx_bgload_t *loadinf ) +{ + struct sfx_vorbis_a_to_inf *inf = loadinf->user; + + // Mark buffer for deallocation if autofree is set + if( inf->flags & SFX_A_FLAG_AUTOFREE ) + { + inf->sys->replacement = loadinf->buffer; + } + else + { + inf->sys->source = loadinf->buffer; + } + + inf->sys->end = loadinf->samples; + + if( inf->flags & SFX_A_FLAG_AUTOSTART ) + { + sfx_save( inf->sys ); + } + + free( loadinf->path ); + free( loadinf ); + free( inf ); +} + +// Asynchronous vorbis load into audio system +void sfx_vorbis_a_to( sfx_system_t *sys, const char *strFileName, int channels, uint32_t flags ) +{ + struct sfx_vorbis_a_to_inf *inf = malloc( sizeof( struct sfx_vorbis_a_to_inf ) ); + inf->flags = flags; + inf->sys = sys; + + sys->ch = channels; + + if( !sfx_vorbis_a( strFileName, channels, sfx_vorbis_a_to_c, inf ) ) + { + free( inf ); + } +} + +// 0 +// ====================================================== + +// Mark change to be uploaded to queue system +int sfx_save( sfx_system_t *sys ) +{ + MUTEX_LOCK( sfx_mux_t01 ); + + if( sfx_q_len >= SFX_MAX_SYSTEMS ) + { + vg_error( "Warning: No free space in sound queue\n" ); + + MUTEX_UNLOCK( sfx_mux_t01 ); + return 0; + } + + // Mark change in queue + sfx_q[ sfx_q_len ++ ] = sys; + + MUTEX_UNLOCK( sfx_mux_t01 ); + + return 1; +} + +// Edit a volume float, has to be function round-tripped +// because of mutex +void sfx_vol_fset( sfx_vol_t *src, float to ) +{ + MUTEX_LOCK( sfx_mux_t01 ); + + src->val = to; + src->cmp = g_master_volume * to; + + MUTEX_UNLOCK( sfx_mux_t01 ); +} + +// thread-safe get volume value +float sfx_vol_fget( sfx_vol_t *src ) +{ + float val; + + MUTEX_LOCK( sfx_mux_t01 ); + + val = src->val; + + MUTEX_UNLOCK( sfx_mux_t01 ); + + return val; +} + +// thread-safe set master volume +void sfx_set_master( float to ) +{ + MUTEX_LOCK( sfx_mux_t01 ); + + g_master_volume = to; + + for( int i = 0; i < SFX_NUM_VOLUMES; i ++ ) + { + g_volumes[ i ]->cmp = g_volumes[ i ]->val * g_master_volume; + } + + MUTEX_UNLOCK( sfx_mux_t01 ); +} + +// thread-safe get master volume +float sfx_get_master(void) +{ + float val; + + MUTEX_LOCK( sfx_mux_t01 ); + + val = g_master_volume; + + MUTEX_UNLOCK( sfx_mux_t01 ); + + return val; +} + +// Miniaudio.h init +void vg_audio_init(void) +{ + // Setup volume values + // Todo: load these from config + g_vol_sfx.val = 1.f; + g_vol_music.val = 1.f; + sfx_set_master( 1.f ); + + g_aud_dconfig = ma_device_config_init( ma_device_type_playback ); + g_aud_dconfig.playback.format = ma_format_f32; + g_aud_dconfig.playback.channels = 2; + g_aud_dconfig.sampleRate = 44100; + g_aud_dconfig.dataCallback = audio_mixer_callback; + + g_aud_dconfig.pUserData = NULL; + + vg_info( "Starting audio engine\n" ); + + if( ma_device_init( NULL, &g_aud_dconfig, &g_aud_device ) != MA_SUCCESS ) + { + vg_exiterr( "ma_device failed to initialize" ); + } + else + { + if( ma_device_start( &g_aud_device ) != MA_SUCCESS ) + { + ma_device_uninit( &g_aud_device ); + vg_exiterr( "ma_device failed to start" ); + } + } +} + +#ifndef VYGER_RELEASE +uint32_t num_sfx_sets = 0; +#endif + +// Shutdown audio device +void vg_audio_free(void) +{ + ma_device_uninit( &g_aud_device ); +} + +// (debug) make sure we are shutting down safely +void sfx_sys_chkerr(void) +{ +#ifndef VYGER_RELEASE + if( num_sfx_sets ) + { + vg_error( "Leaked %u sfx sets\n", num_sfx_sets ); + } +#endif +} + +// 1 +// ====================================================== + +// Create and return slot for a sound +sfx_system_t *sfx_alloc(void) +{ + if( sfx_sys_len >= SFX_MAX_SYSTEMS ) + { + vg_error( "Warning: No free space in sound system\n" ); + + return NULL; + } + + // A conditional is done against this in localization step, + // Needs to be initialized. + sfx_sys[ sfx_sys_len ].source = NULL; + + return sfx_sys + (sfx_sys_len++); +} + +// Copy in data from all queued +void sfx_localize(void) +{ + MUTEX_LOCK( sfx_mux_t01 ); + + while( sfx_q_len --> 0 ) + { + sfx_system_t *src = sfx_q[sfx_q_len]; + + // This is a 'new' sound if optr not set. + if( !src->optr || src->flags & SFX_FLAG_ONESHOT ) + { + src->optr = sfx_alloc(); + } + + // run replacement routine if one is waiting + if( src->replacement ) + { + if( src->source ) + { + printf( "Deallocating previous source buffer\n" ); + } + + free( src->source ); + + src->source = src->replacement; + src->replacement = NULL; + } + + src->optr->source = src->source; + + // Localize data to thread 1's memory pool + // memcpy( src->optr, src, sizeof( sfx_system_t ) ); + + src->optr->spd = src->spd; + src->optr->ch = src->ch; + src->optr->end = src->end; + src->optr->cur = src->cur; + src->optr->flags = src->flags; + // src->optr->sng = src->snh; + src->optr->fadeout = src->fadeout; + // src->optr->optr = src->optr; + // src->optr->cvol = src->cvol; + src->optr->vol_src = src->vol_src; + src->optr->name = src->name; + + // loopback pointer on system system + src->optr->optr = src; + } + sfx_q_len = 0; + + // Pull in volume sliders + for( int i = 0; i < sfx_sys_len; i ++ ) + { + sfx_system_t *sys = sfx_sys + i; + sys->vol = sys->optr->vol; + if( sys->vol_src ) { sys->vol *= sys->vol_src->cmp; }; + } + + MUTEX_UNLOCK( sfx_mux_t01 ); +} + +// Send out updates to sources +void sfx_redist(void) +{ + MUTEX_LOCK( sfx_mux_t01 ); + + unsigned int idx = 0, wr = 0; + while( idx != sfx_sys_len ) + { + sfx_system_t *src = sfx_sys + idx; + + // Keep only if cursor is before end + if( src->cur < src->end ) + { + if( !(src->flags & SFX_FLAG_ONESHOT) ) + { + // Correct source pointer + src->optr->optr = sfx_sys + wr; + } + + sfx_sys[ wr ++ ] = sfx_sys[ idx ]; + } + else + { + if( !(src->flags & SFX_FLAG_ONESHOT) ) + { + // Clear link on source + src->optr->optr = NULL; + } + } + + idx ++ ; + } + sfx_sys_len = wr; + + MUTEX_UNLOCK( sfx_mux_t01 ); +} + +// Fetch samples into pcf +void audio_mixer_getsamples( float *pcf, float *source, uint32_t cur, uint32_t ch ) +{ + if( ch == 2 ) + { + pcf[0] = source[ cur*2+0 ]; + pcf[1] = source[ cur*2+1 ]; + } + else + { + pcf[0] = source[ cur ]; + pcf[1] = source[ cur ]; + } +} + +// miniaudio.h interface +void audio_mixer_callback( ma_device *pDevice, void *pOutBuf, const void *pInput, ma_uint32 frameCount ) +{ + sfx_localize(); + + // Clear buffer ( is necessary ? ) + float *pOut32F = (float *)pOutBuf; + for( int i = 0; i < frameCount * 2; i ++ ){ + pOut32F[i] = 0.f; + } + + // Do something with local.. + for( int i = 0; i < sfx_sys_len; i ++ ) + { + sfx_system_t *sys = sfx_sys + i; + + uint32_t cursor = sys->cur; + uint32_t bpos = 0; + + float avgvol = 0.f; + + float pcf[2] = { 0.f }; + + while( cursor < vg_min( sys->cur + frameCount, sys->end ) ) + { + audio_mixer_getsamples( pcf, sys->source, cursor, sys->ch ); + + avgvol += fabs( pcf[0] * sys->vol ); + avgvol += fabs( pcf[1] * sys->vol ); + + pOut32F[ bpos*2+0 ] += pcf[0] * sys->vol; + pOut32F[ bpos*2+1 ] += pcf[1] * sys->vol; + + // Blend the fadeout cursor in to prevent popping + // This lasts 441 samples + if( sys->fadeout ) + { + if( sys->snh < sys->end ) + { + audio_mixer_getsamples( pcf, sys->source, sys->snh, sys->ch ); + + float mul = (float)sys->fadeout * FADEOUT_DIVISOR; + + pOut32F[ bpos*2+0 ] += pcf[0] * sys->vol * mul; + pOut32F[ bpos*2+1 ] += pcf[1] * sys->vol * mul; + + sys->snh ++; + sys->fadeout --; + } + else + { + sys->fadeout = 0; + } + } + + cursor ++; + bpos ++; + } + + if( !sys->fadeout ) + { + sys->snh = cursor; + } + + sys->cvol = avgvol / (float)(bpos*2); + sys->cur += frameCount; + } + + sfx_redist(); + + (void)pInput; +} + +// Set of up to 8 sound effects packed into one +typedef struct sfx_set sfx_set_t; +struct sfx_set +{ + float *main; + char *sources; + + uint32_t segments[16]; //from->to,from->to ... + uint32_t numsegments; + uint32_t ch; + uint32_t flags; +}; + +// Load strings into sfx_set's memory +// String layout: "sounda.ogg\0soundb.ogg\0soundc.ogg\0\0" +void sfx_set_strings( sfx_set_t *dest, char *strSources, uint32_t flags, int bAsync ) +{ + printf( "Init sfx set\n| start | end | length | name \n" ); + + dest->ch = (flags & SFX_FLAG_STEREO)? 2: 1; + + dest->main = NULL; + dest->numsegments = 0; + char *source = strSources; + + uint32_t total = 0; + int len; + while( (len = strlen( source )) ) + { + uint32_t samples; + float *sound = sfx_vorbis( source, dest->ch, &samples ); + + if( !sound ) + { + free( dest->main ); + dest->numsegments = 0; + return; + } + + total += samples; + + float *nbuf = realloc( dest->main, total * dest->ch * sizeof(float) ); + + if( nbuf ) + { + dest->main = nbuf; + memcpy( dest->main + (total-samples)*dest->ch, sound, samples*dest->ch*sizeof(float) ); + free( sound ); + + dest->segments[ dest->numsegments*2+0 ] = total-samples; + dest->segments[ dest->numsegments*2+1 ] = total; + + printf( "| %09u | %09u | %09u | %s\n", total-samples, total, samples, source ); + } + else + { + vg_error( "realloc() failed\n" ); + free( sound ); + return; + } + + source += len +1; + dest->numsegments ++; + } + + vg_info( "finished, numsegments: %u\n", dest->numsegments ); +} + + +// If sources is non-null then it will try to pull from +// internally set string +// +// internal set string should be literal, otherwise leak if not +// handled correctly +void sfx_set_init( sfx_set_t *dest, char *sources ) +{ +#ifndef VYGER_RELEASE + num_sfx_sets ++; +#endif + + if( !sources ) + { + sfx_set_strings( dest, dest->sources, dest->flags, 0 ); + } + else + { + sfx_set_strings( dest, sources, dest->flags, 0 ); + } +} + +// Pick a random sound from the buffer and play it into system +void sfx_set_playrnd( sfx_set_t *source, sfx_system_t *sys ) +{ + if( !source->numsegments ) + { + return; + } + + int pick = rand() % source->numsegments; + + sys->source = source->main; + sys->cur = source->segments[ pick*2 + 0 ]; + sys->end = source->segments[ pick*2 + 1 ]; + sys->ch = source->ch; + + sfx_save( sys ); +} + +// Free set resources +void sfx_set_free( sfx_set_t *set ) +{ +#ifndef VYGER_RELEASE + num_sfx_sets --; +#endif + free( set->main ); +} diff --git a/vg/vg_io.h b/vg/vg_io.h new file mode 100644 index 0000000..8925412 --- /dev/null +++ b/vg/vg_io.h @@ -0,0 +1,105 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +// Coloured logging +// =========================================================================================================== + +#define KNRM "\x1B[0m" +#define KRED "\x1B[31m" +#define KGRN "\x1B[32m" +#define KYEL "\x1B[33m" +#define KBLU "\x1B[34m" +#define KMAG "\x1B[35m" +#define KCYN "\x1B[36m" +#define KWHT "\x1B[37m" + +#define VG_LOG_WRITE( FILE, PREFIX ) \ + fprintf( FILE, PREFIX ); \ + va_list args; \ + va_start( args, fmt ); \ + vfprintf( FILE, fmt, args ); \ + va_end( args ); \ + fprintf( FILE, KNRM ); + +void vg_success( const char *fmt, ... ) { VG_LOG_WRITE( stdout, (KGRN "success" KWHT "| " KGRN) ); } +void vg_info( const char *fmt, ... ) { VG_LOG_WRITE( stdout, (KNRM " info" KWHT "| " KNRM) ); } +void vg_warn( const char *fmt, ... ) { VG_LOG_WRITE( stdout, (KYEL " warn" KWHT "| " KYEL) ); } +void vg_error( const char *fmt, ... ) { VG_LOG_WRITE( stderr, (KRED " error" KWHT "| " KRED) ); } + +// FILE IO +// =========================================================================================================== + +i64 vg_file_size( FILE *fileptr ) +{ + fseek( fileptr, 0, SEEK_END ); + i64 fsize = ftell( fileptr ); + fseek( fileptr, 0, SEEK_SET ); + + return fsize; +} + +void *vg_disk_open_read( const char *path, int const reserve_end, i64 *size ) +{ + FILE *f = fopen( path, "rb" ); + if( f ) + { + i64 fsize = vg_file_size( f ); + void *buf = malloc( fsize + reserve_end ); + + if( buf ) + { + // Invalid / corrupt read + if( fread( buf, 1, fsize, f ) != fsize ) + { + free( buf ); + buf = NULL; + } + } + + *size = fsize; + + fclose( f ); + return buf; + } + else + { + return NULL; + } +} + +char *vg_disk_load_text( const char *path, i64 *size ) +{ + char *buf; + i64 fsize; + + if( (buf = vg_disk_open_read( path, 1, &fsize )) ) + { + buf[ fsize ] = 0x00; + *size = fsize +1; + + return buf; + } + + return NULL; +} + +void *vg_asset_read_s( const char *path, i64 *size ) +{ + return vg_disk_open_read( path, 0, size ); +} + +void *vg_asset_read( const char *path ) +{ + i64 size; + return vg_disk_open_read( path, 0, &size ); +} + +char *vg_textasset_read_s( const char *path, i64 *size ) +{ + return vg_disk_load_text( path, size ); +} + +char *vg_textasset_read( const char *name ) +{ + i64 size; + return vg_disk_load_text( name, &size ); +} diff --git a/vg/vg_platform.h b/vg/vg_platform.h new file mode 100644 index 0000000..bc610f6 --- /dev/null +++ b/vg/vg_platform.h @@ -0,0 +1,91 @@ +// Copyright (C) 2021 Harry Godden (hgn) - All Rights Reserved + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +typedef int8_t i8; +typedef int16_t i16; +typedef int32_t i32; +typedef int64_t i64; + +typedef unsigned int uint; + +#define vg_min( A, B ) ((A)<(B)?(A):(B)) +#define vg_max( A, B ) ((A)>(B)?(A):(B)) +#define vg_list_size( A ) (sizeof(A)/sizeof(A[0])) + +// THREADING +// ================================================================================================================== + +// Pthred emulation for windows +#ifdef _WIN32 + #include + #define MUTEX_TYPE HANDLE + #define MUTEX_INITIALIZER NULL + #define MUTEX_SETUP(x) (x) = CreateMutex(NULL, FALSE, NULL) + #define MUTEX_CLEANUP(x) (CloseHandle(x)) + #define MUTEX_LOCK(x) emulate_pthread_mutex_lock(&(x)) + #define MUTEX_UNLOCK(x) (ReleaseMutex(x)) + + int emulate_pthread_mutex_lock( volatile MUTEX_TYPE *mx ) + { + if( *mx == NULL ) /* static initializer? */ + { + HANDLE p = CreateMutex( NULL, FALSE, NULL ); + if( InterlockedCompareExchangePointer( (PVOID*)mx, (PVOID)p, NULL ) != NULL ) + CloseHandle(p); + } + + return WaitForSingleObject( *mx, INFINITE ) == WAIT_FAILED; + } +#else + #include + #define MUTEX_LOCK(x) pthread_mutex_lock(&(x)) + #define MUTEX_UNLOCK(x) pthread_mutex_unlock(&(x)) + #define MUTEX_TYPE pthread_mutex_t + #define MUTEX_INITIALIZER {0} +#endif + + +int vg_thread_run( void *pfunc, void *data ) +{ +#ifdef _WIN32 + + HANDLE hThread = CreateThread + ( + NULL, // Thread attributes + 0, // Stack size (0 = use default) + pfunc, // Thread start address + data, // Parameter to pass to the thread + 0, // Creation flags + NULL // Thread id + ); + + if ( hThread == NULL ) + { + // Thread creation failed. + // More details can be retrieved by calling GetLastError() + return 1; + } + else + { + CloseHandle( hThread ); + return 0; + } + +#else + + pthread_t hThread; + if( pthread_create( &hThread, NULL, pfunc, data ) ) + { + return 1; + } + else + { + pthread_detach( hThread ); + return 0; + } + +#endif +} -- 2.25.1