1 /* Copyright (C) 2021-2023 Harry Godden (hgn) - All Rights Reserved
3 * primateves that you use when you need to run something from another thread
4 * back in the main loop of vg, at the start of each frame
13 typedef struct vg_async_item vg_async_item
;
21 void (*fn_runner
)( void *payload
, u32 size
);
27 vg_async_item
*start
, *end
;
29 SDL_sem
*sem_wait_for_flush
;
30 SDL_SpinLock sl_index
;
34 VG_STATIC
enum vg_thread_purpose
vg_thread_purpose(void);
35 VG_STATIC
enum engine_status
_vg_engine_status(void);
38 * Allocate an asynchronous call with a bit of memory
40 VG_STATIC vg_async_item
*vg_async_alloc( u32 size
)
42 /* ditch out here if engine crashed. this serves as the 'quit checking' */
43 if( _vg_engine_status() == k_engine_status_crashed
){
44 assert( vg_thread_purpose() == k_thread_purpose_loader
);
45 longjmp( vg
.env_loader_exit
, 1 );
48 SDL_AtomicLock( &vg_async
.sl_index
);
50 u32 total_allocation
= vg_align8(size
) + vg_align8(sizeof(vg_async_item
)),
51 remaining
= vg_linear_remaining( vg_async
.buffer
),
52 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
54 if( total_allocation
> capacity
){
55 SDL_AtomicUnlock( &vg_async
.sl_index
);
56 vg_error( "Requested: %umb. Buffer size: %umb\n",
57 (total_allocation
/1024)/1024,
58 (capacity
/1024)/1024 );
60 vg_fatal_error( "async alloc invalid size\n" );
63 if( total_allocation
> remaining
){
64 SDL_AtomicUnlock( &vg_async
.sl_index
);
65 SDL_SemWait( vg_async
.sem_wait_for_flush
);
66 SDL_AtomicLock( &vg_async
.sl_index
);
68 remaining
= vg_linear_remaining( vg_async
.buffer
);
69 capacity
= vg_linear_get_capacity( vg_async
.buffer
);
71 assert( remaining
== capacity
);
72 assert( vg_async
.start
== NULL
);
73 assert( vg_async
.end
== NULL
);
76 void *block
= vg_linear_alloc( vg_async
.buffer
, total_allocation
);
78 vg_async_item
*entry
= block
;
81 if( size
) entry
->payload
= ((u8
*)block
) + vg_align8(sizeof(vg_async_item
));
82 else entry
->payload
= NULL
;
85 entry
->fn_runner
= NULL
;
88 vg_async
.end
->next
= entry
;
91 vg_async
.start
= entry
;
95 SDL_AtomicUnlock( &vg_async
.sl_index
);
101 * Wait until the current stack of async calls is completely flushed out
103 VG_STATIC
void vg_async_stall(void)
105 vg_info( "async_stall: %d\n", SDL_SemValue( vg_async
.sem_wait_for_flush
) );
106 SDL_SemWait( vg_async
.sem_wait_for_flush
);
110 * Mark the call as being filled and ready to go
112 VG_STATIC
void vg_async_dispatch( vg_async_item
*item
,
113 void (*runner
)( void *payload
, u32 size
) )
115 if( SDL_SemValue(vg_async
.sem_wait_for_flush
) )
116 SDL_SemWait(vg_async
.sem_wait_for_flush
);
118 SDL_AtomicLock( &vg_async
.sl_index
);
119 item
->fn_runner
= runner
;
120 SDL_AtomicUnlock( &vg_async
.sl_index
);
124 * Make a simple async call without allocating extra.
126 VG_STATIC
void vg_async_call( void (*runner
)( void *payload
, u32 size
),
127 void *payload
, u32 size
)
129 vg_async_item
*call
= vg_async_alloc(0);
130 call
->payload
= payload
;
132 vg_async_dispatch( call
, runner
);
136 * Run as much of the async buffer as possible
138 VG_STATIC
void vg_run_async_checked(void)
140 SDL_AtomicLock( &vg_async
.sl_index
);
142 while( vg_async
.start
){
143 vg_async_item
*entry
= vg_async
.start
;
145 if( entry
->fn_runner
){
146 entry
->fn_runner( entry
->payload
, entry
->size
);
147 vg_async
.start
= entry
->next
;
149 if( vg_async
.start
== NULL
){
152 vg_linear_clear( vg_async
.buffer
);
154 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
155 SDL_SemPost( vg_async
.sem_wait_for_flush
);
160 SDL_AtomicUnlock( &vg_async
.sl_index
);
164 /* TODO: if exceed max frametime.... */
167 if( !SDL_SemValue( vg_async
.sem_wait_for_flush
) ){
168 SDL_SemPost( vg_async
.sem_wait_for_flush
);
171 SDL_AtomicUnlock( &vg_async
.sl_index
);
174 VG_STATIC
void vg_async_init(void)
176 vg_async
.sem_wait_for_flush
= SDL_CreateSemaphore(0);
177 vg_async
.buffer
= vg_create_linear_allocator( NULL
, 50*1024*1024,
181 #endif /* VG_ASYNC_H */