1 /* Copyright (C) 2021-2023 Harry Godden (hgn) - All Rights Reserved
3 * primateves that you use when you need to run something from another thread
4 * back in the main loop of vg, at the start of each frame
13 typedef struct vg_async_item vg_async_item
;
21 void (*fn_runner
)( void *payload
, u32 size
);
27 vg_async_item
*start
, *end
;
29 SDL_sem
*sem_wait_for_flush
;
30 SDL_SpinLock sl_index
;
34 VG_STATIC
enum vg_thread_purpose
vg_thread_purpose(void);
35 VG_STATIC
enum engine_status
_vg_engine_status(void);
38 * Allocate an asynchronous call with a bit of memory
40 VG_STATIC vg_async_item
*vg_async_alloc( u32 size
)
42 /* ditch out here if engine crashed. this serves as the 'quit checking' */
44 if( _vg_engine_status() == k_engine_status_crashed
){
45 assert( vg_thread_purpose() == k_thread_purpose_loader
);
46 longjmp( vg
.env_loader_exit
, 1 );
49 SDL_AtomicLock( &vg_async
.sl_index
);
51 u32 total_allocation
= vg_align8(size
) + vg_align8(sizeof(vg_async_item
)),
52 remaining
= vg_linear_remaining( vg_async
.buffer
),
53 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
55 if( total_allocation
> capacity
){
56 SDL_AtomicUnlock( &vg_async
.sl_index
);
57 vg_error( "Requested: %umb. Buffer size: %umb\n",
58 (total_allocation
/1024)/1024,
59 (capacity
/1024)/1024 );
61 vg_fatal_error( "async alloc invalid size\n" );
64 if( total_allocation
> remaining
){
65 SDL_AtomicUnlock( &vg_async
.sl_index
);
66 SDL_SemWait( vg_async
.sem_wait_for_flush
);
67 SDL_AtomicLock( &vg_async
.sl_index
);
69 remaining
= vg_linear_remaining( vg_async
.buffer
);
70 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
72 assert( remaining
== capacity
);
73 assert( vg_async
.start
== NULL
);
74 assert( vg_async
.end
== NULL
);
77 void *block
= vg_linear_alloc( vg_async
.buffer
, total_allocation
);
79 vg_async_item
*entry
= block
;
82 if( size
) entry
->payload
= ((u8
*)block
) + vg_align8(sizeof(vg_async_item
));
83 else entry
->payload
= NULL
;
86 entry
->fn_runner
= NULL
;
89 vg_async
.end
->next
= entry
;
92 vg_async
.start
= entry
;
96 SDL_AtomicUnlock( &vg_async
.sl_index
);
102 * Wait until the current stack of async calls is completely flushed out
104 VG_STATIC
void vg_async_stall(void)
106 vg_info( "async_stall: %d\n", SDL_SemValue( vg_async
.sem_wait_for_flush
) );
107 SDL_SemWait( vg_async
.sem_wait_for_flush
);
111 * Mark the call as being filled and ready to go
113 VG_STATIC
void vg_async_dispatch( vg_async_item
*item
,
114 void (*runner
)( void *payload
, u32 size
) )
116 if( SDL_SemValue(vg_async
.sem_wait_for_flush
) )
117 SDL_SemWait(vg_async
.sem_wait_for_flush
);
119 SDL_AtomicLock( &vg_async
.sl_index
);
120 item
->fn_runner
= runner
;
121 SDL_AtomicUnlock( &vg_async
.sl_index
);
125 * Make a simple async call without allocating extra.
127 VG_STATIC
void vg_async_call( void (*runner
)( void *payload
, u32 size
),
128 void *payload
, u32 size
)
130 vg_async_item
*call
= vg_async_alloc(0);
131 call
->payload
= payload
;
133 vg_async_dispatch( call
, runner
);
137 * Run as much of the async buffer as possible
139 VG_STATIC
void vg_run_async_checked(void)
141 SDL_AtomicLock( &vg_async
.sl_index
);
143 while( vg_async
.start
){
144 vg_async_item
*entry
= vg_async
.start
;
146 if( entry
->fn_runner
){
147 entry
->fn_runner( entry
->payload
, entry
->size
);
148 vg_async
.start
= entry
->next
;
150 if( vg_async
.start
== NULL
){
153 vg_linear_clear( vg_async
.buffer
);
155 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
156 SDL_SemPost( vg_async
.sem_wait_for_flush
);
161 SDL_AtomicUnlock( &vg_async
.sl_index
);
165 /* TODO: if exceed max frametime.... */
168 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
169 SDL_SemPost( vg_async
.sem_wait_for_flush
);
172 SDL_AtomicUnlock( &vg_async
.sl_index
);
175 VG_STATIC
void vg_async_init(void)
177 vg_async
.sem_wait_for_flush
= SDL_CreateSemaphore(0);
178 vg_async
.buffer
= vg_create_linear_allocator( NULL
, 50*1024*1024,
182 #endif /* VG_ASYNC_H */