--- /dev/null
+#include "vg_async.h"
+
+struct vg_async vg_async;
+
+enum vg_thread_purpose vg_thread_purpose(void);
+enum engine_status _vg_engine_status(void);
+
+/*
+ * Allocate an asynchronous call with a bit of memory
+ */
+vg_async_item *vg_async_alloc( u32 size )
+{
+ /* ditch out here if engine crashed. this serves as the 'quit checking' */
+ if( _vg_engine_status() == k_engine_status_crashed ){
+ longjmp( vg.env_loader_exit, 1 );
+ }
+
+ SDL_AtomicLock( &vg_async.sl_index );
+
+ u32 total_allocation = vg_align8(size) + vg_align8(sizeof(vg_async_item)),
+ remaining = vg_linear_remaining( vg_async.buffer ),
+ capacity = vg_linear_get_capacity( vg_async.buffer );
+
+ if( total_allocation > capacity ){
+ SDL_AtomicUnlock( &vg_async.sl_index );
+ vg_error( "Requested: %umb. Buffer size: %umb\n",
+ (total_allocation/1024)/1024,
+ (capacity/1024)/1024 );
+
+ vg_fatal_error( "async alloc invalid size\n" );
+ }
+
+ if( total_allocation > remaining ){
+ SDL_AtomicUnlock( &vg_async.sl_index );
+ SDL_SemWait( vg_async.sem_wait_for_flush );
+ SDL_AtomicLock( &vg_async.sl_index );
+
+ remaining = vg_linear_remaining( vg_async.buffer );
+ capacity = vg_linear_get_capacity( vg_async.buffer );
+ }
+
+ void *block = vg_linear_alloc( vg_async.buffer, total_allocation );
+
+ vg_async_item *entry = block;
+ entry->next = NULL;
+
+ if( size ) entry->payload = ((u8*)block) + vg_align8(sizeof(vg_async_item));
+ else entry->payload = NULL;
+
+ entry->size = size;
+ entry->fn_runner = NULL;
+
+ if( vg_async.end ){
+ vg_async.end->next = entry;
+ vg_async.end = entry;
+ }else{
+ vg_async.start = entry;
+ vg_async.end = entry;
+ }
+
+ SDL_AtomicUnlock( &vg_async.sl_index );
+
+ return entry;
+}
+
+/*
+ * Wait until the current stack of async calls is completely flushed out
+ */
+void vg_async_stall(void)
+{
+ vg_assert_thread(k_thread_purpose_loader);
+#if 0
+ vg_info( "async_stall: %d\n", SDL_SemValue( vg_async.sem_wait_for_flush ) );
+#endif
+ SDL_SemWait( vg_async.sem_wait_for_flush );
+}
+
+/*
+ * Mark the call as being filled and ready to go
+ */
+void vg_async_dispatch( vg_async_item *item,
+ void (*runner)( void *payload, u32 size ) )
+{
+ vg_assert_thread(k_thread_purpose_loader);
+ if( SDL_SemValue(vg_async.sem_wait_for_flush) )
+ SDL_SemWait(vg_async.sem_wait_for_flush);
+
+ SDL_AtomicLock( &vg_async.sl_index );
+ item->fn_runner = runner;
+ SDL_AtomicUnlock( &vg_async.sl_index );
+}
+
+/*
+ * Make a simple async call without allocating extra.
+ */
+void vg_async_call( void (*runner)( void *payload, u32 size ),
+ void *payload, u32 size )
+{
+ vg_assert_thread(k_thread_purpose_loader);
+ vg_async_item *call = vg_async_alloc(0);
+ call->payload = payload;
+ call->size = size;
+ vg_async_dispatch( call, runner );
+}
+
+/*
+ * Run as much of the async buffer as possible
+ */
+void vg_run_async_checked(void)
+{
+ SDL_AtomicLock( &vg_async.sl_index );
+
+ while( vg_async.start ){
+ vg_async_item *entry = vg_async.start;
+
+ if( entry->fn_runner ){
+ entry->fn_runner( entry->payload, entry->size );
+ vg_async.start = entry->next;
+
+ if( vg_async.start == NULL ){
+ vg_async.end = NULL;
+
+ vg_linear_clear( vg_async.buffer );
+
+ if( !SDL_SemValue( vg_async.sem_wait_for_flush ) ){
+ SDL_SemPost( vg_async.sem_wait_for_flush );
+ }
+ }
+ }
+ else{
+ SDL_AtomicUnlock( &vg_async.sl_index );
+ return;
+ }
+
+ /* TODO: if exceed max frametime.... */
+ }
+
+ if( !SDL_SemValue( vg_async.sem_wait_for_flush ) ){
+ SDL_SemPost( vg_async.sem_wait_for_flush );
+ }
+
+ SDL_AtomicUnlock( &vg_async.sl_index );
+}
+
+void vg_async_init(void)
+{
+ vg_async.sem_wait_for_flush = SDL_CreateSemaphore(0);
+ vg_async.buffer = vg_create_linear_allocator( NULL, 50*1024*1024,
+ VG_MEMORY_SYSTEM );
+}