#include "vg/vg_log.h"
#include "vg/vg_opt.h"
#include "vg/vg_build.h"
+#include "vg/vg_mem_pool.h"
#include "vg/vg_build_utils_shader.h"
#include "vg/vg_msg.h"
#include "src/addon_types.h"
vg_compiler_run( &test_proj, &vg_test_env, &conf, sources.buffer, "dbtest", k_obj_type_exe );
}
+#include "vg/vg_mem_pool.c"
+
int main( int argc, const char *argv[] )
{
vg_log_init();
-
-#if 0
- vg_stack_allocator s0;
- vg_stack_init( &s0, NULL, VG_MB(2), NULL );
- u32 size0;
- void *buf0 = vg_file_read( &s0, "content_skaterift/textures/prem.qoi", &size0, 0 );
- vg_mem_dumphex( stdout, buf0, 0, 1024 );
-
- vg_stack_allocator s1;
- vg_stack_init( &s1, NULL, VG_MB(2), NULL );
- u32 size1;
- void *buf1 = vg_file_read( &s1, "content_skaterift/textures/guide_pump.qoi", &size1, 0 );
-
- vg_mem_dumphex( stdout, buf1, 0, 1024 );
-
- return 0;
-#endif
-
_vg_opt_init( argc, argv );
const char *arg;
{
/* create the allocations pool */
u32 alloc_size = sizeof(struct addon_cache_entry)*inf->cache_count;
- cache->allocs = vg_stack_allocate( &vg.rtmem, alloc_size, 8, "Cache entries" );
- memset( cache->allocs, 0, alloc_size );
-
- cache->pool.buffer = cache->allocs;
- cache->pool.count = inf->cache_count;
- cache->pool.stride = sizeof( struct addon_cache_entry );
- cache->pool.offset = offsetof( struct addon_cache_entry, poolnode );
- vg_pool_init( &cache->pool );
+ cache->entries = vg_stack_allocate( &vg.rtmem, alloc_size, 8, "Cache entries" );
+ memset( cache->entries, 0, alloc_size );
+ vg_pool_init( &cache->pool, &cache->pool_unreferenced, inf->cache_count, &vg.rtmem );
/* create the real memory */
u32 cache_size = inf->cache_stride*inf->cache_count;
for( i32 j=0; j<inf->cache_count; j++ )
{
- struct addon_cache_entry *alloc = &cache->allocs[j];
- alloc->addon_id = 0;
+ struct addon_cache_entry *entry = &cache->entries[j];
+ entry->addon_id = 0;
if( inf->item_arena_size )
- alloc->item_arena = vg_stack_make_substack( &vg.rtmem, inf->item_arena_size, "Addon item arena" );
+ entry->item_arena = vg_stack_make_substack( &vg.rtmem, inf->item_arena_size, "Addon item arena" );
else
- alloc->item_arena = NULL;
+ entry->item_arena = NULL;
}
}
}
{
THREAD_0;
struct cache_complete_info *info = (void *)task->data;
-
struct addon_cache *cache = &_addon.cache[info->type];
- addon_cache_entry *cache_entry = vg_pool_item( &cache->pool, info->cache_id );
- cache_entry->state = info->result_state;
+ cache->entries[ vg_pool_index( &cache->pool, info->cache_id ) ].state = info->result_state;
}
struct cache_load_info
{
THREAD_1;
struct cache_load_info *info = (void *)task->data;
- struct addon_cache *cache = &_addon.cache[info->type];
- addon_cache_entry *cache_entry = vg_pool_item( &cache->pool, info->cache_id );
+ addon_cache_entry *cache_entry = get_addon_cache_entry( info->type, info->cache_id );
vg_info( "process cache load request (%u#%u): %s\n", info->type, info->cache_id, info->path );
/* load content files
{
struct addon_cache *cache = &_addon.cache[type];
- for( u32 id=1; id<=cache->pool.count; id++ )
+ u16 id = cache->pool_referenced.tail;
+ while( id )
{
- addon_cache_entry *entry = vg_pool_item( &cache->pool, id );
+ addon_cache_entry *entry = &cache->entries[ vg_pool_index( &cache->pool, id ) ];
if( entry->state == k_addon_cache_state_load_request )
{
char path_buf[4096];
entry->state = k_addon_cache_state_loading;
vg_async_task_dispatch( task, cache_load_task );
}
+ id = vg_pool_next( &cache->pool, id, 0 );
}
}
}
struct addon_cache *cache = &_addon.cache[type];
+ u32 index = vg_pool_index( &cache->pool, cache_id );
if( only_if_loaded )
{
- struct addon_cache_entry *entry = vg_pool_item( &cache->pool, cache_id );
+ struct addon_cache_entry *entry = &cache->entries[ index ];
if( entry->state != k_addon_cache_state_loaded )
return NULL;
}
- return cache->items + ((size_t)(cache_id-1) * cache->stride);
+ return cache->items + (index * cache->stride);
}
addon_cache_id addon_cache_create_viewer( enum addon_type type, addon_id addon_id )
addon_cache_id cache_id = reg->cache_id;
if( !cache_id )
{
- struct addon_cache *cache = &_addon.cache[ type ];
- cache_id = vg_pool_lru( &cache->pool );
-
+ cache_id = cache->pool_unreferenced.tail;
if( !cache_id )
{
vg_error( "cache full (type: %u)!\n", type );
return 0;
}
- struct addon_cache_entry *new_entry = vg_pool_item( &cache->pool, cache_id );
+ addon_cache_entry *new_entry = get_addon_cache_entry( type, cache_id );
if( new_entry->state == k_addon_cache_state_loaded )
{
if( type == k_addon_type_board )
reg->cache_id = cache_id;
}
- if( cache_id )
- vg_pool_watch( &cache->pool, cache_id );
-
+ addon_cache_watch( type, cache_id );
return cache_id;
}
return;
struct addon_cache *cache = &_addon.cache[type];
- vg_pool_watch( &cache->pool, cache_id );
+ if( vg_pool_reference( &cache->pool, cache_id, VG_POOL_INCREMENT ) == 1 )
+ vg_pool_switch( &cache->pool, &cache->pool_unreferenced, &cache->pool_referenced, cache_id );
}
void addon_cache_unwatch( enum addon_type type, addon_cache_id cache_id )
return;
struct addon_cache *cache = &_addon.cache[type];
- vg_pool_unwatch( &cache->pool, cache_id );
+ if( vg_pool_reference( &cache->pool, cache_id, VG_POOL_DECREMENT ) == 0 )
+ vg_pool_switch( &cache->pool, &cache->pool_referenced, &cache->pool_unreferenced, cache_id );
}
+addon_cache_entry *get_addon_cache_entry( enum addon_type type, addon_cache_id cache_id )
+{
+ if( cache_id )
+ {
+ struct addon_cache *cache = &_addon.cache[type];
+ return &cache->entries[ vg_pool_index( &cache->pool, cache_id ) ];
+ }
+ else return NULL;
+}
struct addon_cache_entry
{
addon_id addon_id;
- vg_pool_node poolnode;
-
enum addon_cache_state
{
k_addon_cache_state_none,
char local_cpart[ ADDON_CPART_MAX ];
vg_stack_allocator *item_arena;
}
- *allocs;
+ *entries;
vg_pool pool;
+ vg_pool_chain pool_unreferenced, pool_referenced;
void *items; /* Small header struct */
size_t stride;
void addon_cache_watch( enum addon_type type, addon_cache_id cache_id );
void addon_cache_unwatch( enum addon_type type, addon_cache_id cache_id );
void *addon_cache_item_data( enum addon_type type, addon_cache_id cache_id, bool only_if_loaded );
+addon_cache_entry *get_addon_cache_entry( enum addon_type type, u16 cache_id );
skateshop_playermod( 0 );
srinput.state = k_input_state_resume;
- struct addon_cache *cache = &_addon.cache[ k_addon_type_player ];
- struct player_model *model = addon_cache_item_data( k_addon_type_player, localplayer.playermodel.cache_slot, 1 );
- addon_cache_entry *cache_entry = vg_pool_item( &cache->pool, localplayer.playermodel.cache_slot );
- addon_id id = cache_entry->addon_id;
-
+ u16 slot = localplayer.playermodel.cache_slot;
+ struct player_model *model = addon_cache_item_data( k_addon_type_player, slot, 1 );
if( model )
{
if( model->flags & PLAYER_MODEL_FLAG_CUSTOMIZABLE )
{
+ addon_cache_entry *cache_entry = get_addon_cache_entry( k_addon_type_player, slot );
memcpy( cache_entry->local_cpart, localplayer.playermodel.cpart, ADDON_CPART_MAX );
savedata_file file;
init_savefile( &file, "" );
- addon_make_savedata_path( id, file.path );
+ addon_make_savedata_path( cache_entry->addon_id, file.path );
vg_msg_wkvstr( &file.msg, "cpart", localplayer.playermodel.cpart );
write_savefile( &file, 1 );
}
ent_marker *mark_rack = af_arritm( &world->ent_marker, mdl_entity_id_id(shop->boards.id_rack)),
*mark_display = af_arritm( &world->ent_marker, mdl_entity_id_id(shop->boards.id_display));
- struct addon_cache *cache = &_addon.cache[k_addon_type_board];
u32 page = _skateshop.selected_board_index/SKATESHOP_VIEW_SLOT_MAX;
/* Render loaded boards in the view slots */
if( !slot->cache_id )
goto fade_out;
- addon_cache_entry *entry = vg_pool_item( &cache->pool, slot->cache_id );
+ addon_cache_entry *entry = get_addon_cache_entry( k_addon_type_board, slot->cache_id );
struct player_board *board = addon_cache_item_data( k_addon_type_board, slot->cache_id, 1 );
if( !board )
goto fade_out;
font3d_simple_draw( 0, "Nothing installed", cam, mmdl );
u16 cache_id = skateshop_selected_board_cache_id();
- struct addon_cache_entry *entry = vg_pool_item( &cache->pool, cache_id );
+ addon_cache_entry *entry = get_addon_cache_entry( k_addon_type_board, cache_id );
addon_reg *reg = NULL;
if( entry )
reg = addon_details( entry->addon_id );
if( shop->type == k_skateshop_type_charshop )
{
u32 index = 0;
-
- if( localplayer.playermodel.cache_slot )
+ u16 slot = localplayer.playermodel.cache_slot;
+ if( slot )
{
- struct addon_cache *cache = &_addon.cache[ k_addon_type_player ];
- addon_cache_entry *cache_entry = vg_pool_item( &cache->pool, localplayer.playermodel.cache_slot );
- if( _addon_get_filtered_index( k_addon_type_player, cache_entry->addon_id, 0, ADDON_REG_HIDDEN, &index ) )
- {
- }
+ addon_cache_entry *cache_entry = get_addon_cache_entry( k_addon_type_player, slot );
+ (void)_addon_get_filtered_index( k_addon_type_player, cache_entry->addon_id, 0, ADDON_REG_HIDDEN, &index );
}
_skateshop.selected_player_index = index;
}
u8 decrypted[1024];
u32 ticket_len = 1024;
- int success = SteamEncryptedAppTicket_BDecryptTicket( auth->ticket, auth->ticket_length, decrypted,
- &ticket_len, _gameserver.app_symmetric_key,
- k_nSteamEncryptedAppTicketSymmetricKeyLen );
+ bool success = SteamEncryptedAppTicket_BDecryptTicket(
+ auth->ticket, auth->ticket_length,
+ decrypted, &ticket_len,
+ _steam_api.server_symmetric_key, k_nSteamEncryptedAppTicketSymmetricKeyLen );
if( !success )
{
clients[ NETWORK_MAX_PLAYERS ];
u8 client_knowledge_mask[ (NETWORK_MAX_PLAYERS*(NETWORK_MAX_PLAYERS-1))/2 ];
- u8 app_symmetric_key[ k_nSteamEncryptedAppTicketSymmetricKeyLen ];
u64 ticks;
u64 global_uid;
struct _gs_requests _gs_requests;
-static void log_request_status( gs_request *req )
+static gs_request *_get_request( u16 request_id )
+{
+ if( request_id )
+ return &_gs_requests.requests[ vg_pool_index( &_gs_requests.pool, request_id ) ];
+ else
+ return NULL;
+}
+
+static void log_request_status( u16 request_id )
{
THREAD_0;
+ gs_request *req = _get_request( request_id );
const char *associated_username = "none";
struct gameserver_client *client = &_gameserver.clients[ req->client_id ];
if( (client->active) && (client->session_uid == req->user_uid) )
associated_username = client->username;
- u16 pool_id = vg_pool_id( &_gs_requests.request_pool, req );
-
const char *request_state_str = (const char *[])
{
[k_request_state_none] = "None",
[k_request_state_transfer_start] = "Start transfer",
[k_request_state_transfer] = "Transferring",
[k_request_state_finished] = "Finished",
+ [k_request_state_invalidated] = "Invalidated",
[k_request_state_max ] = NULL
}
[ req->state ];
{
KRED, KGRN, KYEL, KBLU,
KMAG, KCYN
- }[ pool_id % 6 ];
+ }[ request_id % 6 ];
vg_low( "req[%s%s##%hu" KWHT "] State: %s, Status: %u\n",
- colour, associated_username, pool_id, request_state_str, (u32)req->status );
+ colour, associated_username, request_id, request_state_str, (u32)req->status );
}
void _gs_requests_init(void)
u32 total_requests = GS_MAX_REQUESTS*NETWORK_MAX_PLAYERS;
u32 alloc_size = sizeof(gs_request)*total_requests;
- _gs_requests.request_buffer = malloc( alloc_size );
- memset( _gs_requests.request_buffer, 0, alloc_size );
-
- vg_pool *pool = &_gs_requests.request_pool;
- pool->buffer = _gs_requests.request_buffer;
- pool->count = total_requests;
- pool->stride = sizeof( gs_request );
- pool->offset = offsetof( gs_request, poolnode );
- vg_pool_init( pool );
-
+ _gs_requests.requests = vg_malloc( alloc_size );
+ memset( _gs_requests.requests, 0, alloc_size );
+ vg_pool_init( &_gs_requests.pool, &_gs_requests.inactive_chain, GS_MAX_REQUESTS*NETWORK_MAX_PLAYERS, NULL );
_gs_requests.transfer_stream_buffer = malloc( GS_TRANSFER_MAX_SIZE*NETWORK_MAX_PLAYERS );
}
-static void gs_request_release( gs_request *req )
+static void _gs_release_request( u16 client_id, u16 request_id )
{
THREAD_0;
+ VG_ASSERT( request_id );
- if( req->message )
- {
- SteamAPI_SteamNetworkingMessage_t_Release( req->message );
- req->message = NULL;
- }
+ gs_request *req = _get_request( request_id );
+ req->state = k_request_state_invalidated;
+
+ VG_ASSERT( req->message );
+ SteamAPI_SteamNetworkingMessage_t_Release( req->message );
+ req->message = NULL;
+
+ gs_request_client *rc = &_gs_requests.clients[ client_id ];
+ vg_pool_switch( &_gs_requests.pool, &rc->active_chain, &_gs_requests.inactive_chain, request_id );
}
void _gs_requests_client_disconnect( u32 client_id )
{
THREAD_0;
-
gs_request_client *rc = &_gs_requests.clients[ client_id ];
-
- while( rc->current_request )
+ u16 request_id = rc->active_chain.tail;
+ while( request_id )
{
- gs_request *req = vg_pool_item( &_gs_requests.request_pool, rc->current_request );
-
- if( vg_pool_unwatch( &_gs_requests.request_pool, rc->current_request ) )
- gs_request_release( req );
-
- rc->current_request = req->waiting_request;
- rc->active_request_count --;
+ _get_request( request_id )->deleted = 1;
+ request_id = vg_pool_next( &_gs_requests.pool, request_id, 0 );
}
}
for( u32 i=0; i<NETWORK_MAX_PLAYERS; i ++ )
{
struct gameserver_client *client = &_gameserver.clients[i];
-
- if( !client->active )
- continue;
-
gs_request_client *rc = &_gs_requests.clients[i];
- if( rc->current_request == 0 )
+ u16 request_id = rc->active_chain.tail;
+ if( request_id == 0 )
continue;
- gs_request *req = vg_pool_item( &_gs_requests.request_pool, rc->current_request );
+ gs_request *req = _get_request( request_id );
+ if( req->deleted )
+ {
+ if( req->state == k_request_state_server_processing )
+ continue;
+ else
+ {
+ _gs_release_request( i, request_id );
+ continue;
+ }
+ }
if( req->state == k_request_state_none )
{
req->data_buffer = _gs_requests.transfer_stream_buffer + (i*GS_TRANSFER_MAX_SIZE);
req->data_buffer_send_size = 0;
req->send_offset = 0;
- vg_pool_watch( &_gs_requests.request_pool, rc->current_request );
vg_async_task *run_task = vg_allocate_async_task( &_gs_db.tasks, sizeof(struct task_request_run_info), 1 );
struct task_request_run_info *info = (void *)run_task->data;
- info->pool_id = rc->current_request;
+ info->pool_id = request_id;
vg_async_task_dispatch( run_task, task_request_run );
req->state = k_request_state_server_processing;
- log_request_status( req );
+ log_request_status( request_id );
}
else if( req->state == k_request_state_server_processing )
{
msg->m_idxLane = 1;
netmsg_request *res = msg->m_pData;
res->inetmsg_id = k_inetmsg_response;
- res->id = req->client_request_id;
+ res->uid = req->client_request_uid;
res->status = req->status;
SteamAPI_ISteamNetworkingSockets_SendMessages( _steam_api.pSteamNetworkingSockets, 1, &msg, NULL );
- req->state = k_request_state_finished;
- log_request_status( req );
_gameserver.bytes_send1 += size;
+ _gs_release_request( i, request_id );
}
else if( req->state == k_request_state_transfer_start )
{
struct netmsg_request *res = msg->m_pData;
res->inetmsg_id = k_inetmsg_response;
- res->id = req->client_request_id;
+ res->uid = req->client_request_uid;
res->status = k_request_status_transfer_header;
struct netmsg_transfer_header *header = (void *)res->buffer;
SteamAPI_ISteamNetworkingSockets_SendMessages( _steam_api.pSteamNetworkingSockets, 1, &msg, NULL );
req->state = k_request_state_transfer;
- log_request_status( req );
+ log_request_status( request_id );
_gameserver.bytes_send1 += size;
}
else if( req->state == k_request_state_transfer )
{
+ bool done = 0;
u32 size = GS_TRANSFER_BYTES_PER_TICK;
if( req->send_offset + size >= req->data_buffer_send_size )
{
size = req->data_buffer_send_size - req->send_offset;
req->state = k_request_state_finished;
- log_request_status( req );
+ log_request_status( request_id );
+ done = 1;
}
u32 message_size = sizeof(netmsg_request) + size;
- SteamNetworkingMessage_t *msg = SteamAPI_ISteamNetworkingUtils_AllocateMessage( _steam_api.pSteamNetworkingUtils, message_size );
+ SteamNetworkingMessage_t *msg = SteamAPI_ISteamNetworkingUtils_AllocateMessage(
+ _steam_api.pSteamNetworkingUtils, message_size );
+
msg->m_conn = client->connection;
msg->m_idxLane = 1;
-
struct netmsg_request *res = msg->m_pData;
res->inetmsg_id = k_inetmsg_response;
- res->id = req->client_request_id;
+ res->uid = req->client_request_uid;
res->status = k_request_status_transfer_continue;
memcpy( res->buffer, req->data_buffer + req->send_offset, size );
SteamAPI_ISteamNetworkingSockets_SendMessages( _steam_api.pSteamNetworkingSockets, 1, &msg, NULL );
req->send_offset += size;
_gameserver.bytes_send1 += size;
+
+ if( done )
+ _gs_release_request( i, request_id );
}
else if( req->state == k_request_state_finished )
- {
- if( vg_pool_unwatch( &_gs_requests.request_pool, rc->current_request ) )
- gs_request_release( req );
-
- rc->current_request = req->waiting_request;
- rc->active_request_count --;
-
- req->state = k_request_state_none;
- log_request_status( req );
- }
+ _gs_release_request( i, request_id );
}
}
{
THREAD_0;
struct task_request_run_info *info = (void *)task->data;
- gs_request *req = vg_pool_item( &_gs_requests.request_pool, info->pool_id );
-
- SteamAPI_SteamNetworkingMessage_t_Release( req->message );
- req->message = NULL;
+ gs_request *req = _get_request( info->pool_id );
- /* check if we're still pointing at the same user of the same session */
- struct gameserver_client *client = &_gameserver.clients[ req->client_id ];
- if( (client->active == 0) || (client->session_uid != req->user_uid) )
+ if( req->deleted )
{
- /* should be going from 1 -> 0 */
- VG_ASSERT( vg_pool_unwatch( &_gs_requests.request_pool, info->pool_id ) );
- gs_request_release( req );
-
- vg_low( "Ignoring response because session uid%u != user uid%u. (ok)\n", client->session_uid, req->user_uid );
+ req->state = k_request_state_invalidated;
return;
}
/* OK or client error */
- if( req->client_request_id )
+ if( req->client_request_uid )
{
if( req->status == k_request_status_ok )
req->state = k_request_state_transfer_start;
/* request ID of 0 means the client doesn't care about getting the response back */
req->state = k_request_state_finished;
}
- log_request_status( req );
-
- /* should be going from 2 -> 1 */
- VG_ASSERT( vg_pool_unwatch( &_gs_requests.request_pool, info->pool_id ) == 0 );
+ log_request_status( info->pool_id );
}
static void task_request_run( vg_async_task *task )
THREAD_1;
struct task_request_run_info *info = (void *)task->data;
- gs_request *req = vg_pool_item( &_gs_requests.request_pool, info->pool_id );
+ gs_request *req = _get_request( info->pool_id );
netmsg_request *client_packet = (netmsg_request *)req->message->m_pData;
vg_msg client_msg;
enum request_status error_status = 0;
- if( rc->active_request_count < GS_MAX_REQUESTS )
+ if( rc->active_chain.count < GS_MAX_REQUESTS )
{
- u16 new_id = vg_pool_lru( &_gs_requests.request_pool );
+ u16 new_id = _gs_requests.inactive_chain.tail;
if( new_id )
{
- rc->active_request_count ++;
-
- vg_pool_watch( &_gs_requests.request_pool, new_id );
- gs_request *req = vg_pool_item( &_gs_requests.request_pool, new_id );
-
+ vg_pool_switch( &_gs_requests.pool, &_gs_requests.inactive_chain, &rc->active_chain, new_id );
+ gs_request *req = _get_request( new_id );
req->state = k_request_state_none;
req->message = msg;
req->user_uid = client->session_uid;
req->user_steamid = client->steamid;
req->client_id = client_id;
- req->client_request_id = client_packet->id;
+ req->client_request_uid = client_packet->uid;
req->status = k_request_status_ok;
req->data_buffer = NULL;
req->data_buffer_send_size = 0;
req->send_offset = 0;
- req->waiting_request = 0;
-
- gs_request *last_request = vg_pool_item( &_gs_requests.request_pool, rc->current_request ),
- *next = last_request;
-
- while( next )
- {
- last_request = next;
- next = vg_pool_item( &_gs_requests.request_pool, last_request->waiting_request );
- }
-
- if( last_request )
- last_request->waiting_request = new_id;
- else
- rc->current_request = new_id;
-
- log_request_status( req );
+ req->deleted = 0;
+ log_request_status( new_id );
return;
}
else
reply_msg->m_idxLane = 1;
netmsg_request *res = reply_msg->m_pData;
res->inetmsg_id = k_inetmsg_response;
- res->id = client_packet->id;
+ res->uid = client_packet->uid;
res->status = error_status;
SteamAPI_ISteamNetworkingSockets_SendMessages( _steam_api.pSteamNetworkingSockets, 1, &reply_msg, NULL );
SteamAPI_SteamNetworkingMessage_t_Release( msg );
struct gs_request
{
- vg_pool_node poolnode;
- u16 waiting_request;
-
enum request_state
{
k_request_state_none,
k_request_state_transfer_start,
k_request_state_transfer,
k_request_state_finished,
+ k_request_state_invalidated,
k_request_state_max
}
state;
- SteamNetworkingMessage_t *message;
+ bool deleted;
+ SteamNetworkingMessage_t *message;
u64 user_steamid;
u64 user_uid;
u32 client_id;
- u8 client_request_id;
+ u32 client_request_uid;
enum request_status status;
void *data_buffer;
struct _gs_requests
{
- gs_request *request_buffer;
- vg_pool request_pool;
+ gs_request *requests;
+ vg_pool pool;
+ vg_pool_chain inactive_chain;
void *transfer_stream_buffer;
struct gs_request_client
{
- u16 current_request;
- u32 active_request_count;
+ vg_pool_chain active_chain;
}
clients[ NETWORK_MAX_PLAYERS ];
}
if( view_id )
{
- struct addon_cache *cache = &_addon.cache[addon_type];
- addon_cache_entry *entry = vg_pool_item( &cache->pool, view_id );
+ addon_cache_entry *entry = get_addon_cache_entry( addon_type, view_id );
addon_make_uid_cpart( entry->addon_id, item->uid, entry->local_cpart );
}
else
netplayers.list[i].active = 0;
}
- _net_requests_reset();
+ _net_requests_deleteall();
}
void network_status_string( vg_str *str, u32 *colour )
return;
}
+ /* Wait for app key to arrive */
+ if( _steam_api.app_key_length == 0 )
+ return;
+
if( state == k_ESteamNetworkingConnectionState_Connected )
{
poll_remote_connection();
network_client.last_attempt = vg.time_real;
}
}
+
+ _net_requests_update();
}
void chat_send_message( const char *message, bool rcon )
#include "network_msg.h"
#include "addon_types.h"
-#define NETWORK_MAX_REQUESTS 8
-
/*
* Interface
*/
#define NETWORK_BUFFERFRAMES 6
#define NETWORK_MAX_CHAT 128
#define NETWORK_REGION_MAX 32
-#define NETWORK_SKATERIFT_VERSION 10
+#define NETWORK_SKATERIFT_VERSION 11
#define NETWORK_REQUEST_MAX 2048
#define NETWORK_LEADERBOARD_ALLTIME 0
/* requests 300 */
typedef struct netmsg_request netmsg_request;
-#if defined( REQUEST_V2 )
enum{ k_inetmsg_request = 302, k_inetmsg_response = 303 };
-#else
-enum{ k_inetmsg_request = 300, k_inetmsg_response = 301 };
-#endif
-struct netmsg_request {
+
+struct netmsg_request
+{
u16 inetmsg_id;
- u8 id, status;
-#if defined( REQUEST_V2 )
+ u8 UNUSED0, status;
u32 uid;
-#endif
u8 buffer[];
};
struct netmsg_transfer_header
{
u32 data_size, chunks;
-#if defined( REQUEST_V2 )
- u32 uid;
-#endif
};
enum request_status {
else return "Status code out of range";
}
-static void log_request_status( net_request *req, const char *comment )
+static net_request *_get_request( u16 request_id )
{
- const char *associated_username = "local";
- u16 pool_id = vg_pool_id( &_net_requests.request_pool, req );
+ return &_net_requests.requests[ vg_pool_index( &_net_requests.pool, request_id ) ];
+}
+static void log_request_status( u16 request_id, const char *comment )
+{
+ net_request *req = _get_request( request_id );
+ const char *associated_username = "local";
const char *request_state_str = (const char *[])
{
[k_request_state_none] = "None",
[k_request_state_sent] = "Sent",
[k_request_state_receiving] = "Recieving",
- [k_request_state_finished] = "Finished",
- [k_request_state_error] = "Error",
+ [k_request_state_finished] = KGRN "Finished",
+ [k_request_state_error] = KRED "Error",
[k_request_state_max ] = NULL
}
[ req->state ];
{
KRED, KGRN, KYEL, KBLU,
KMAG, KCYN
- }[ pool_id % 6 ];
+ }[ request_id % 6 ];
if( comment )
- vg_low( "req[%s%s##%hu" KWHT "] State: %s (%s)\n", colour, associated_username, pool_id, request_state_str, comment );
+ vg_low( "req[%s%s##%hu" KWHT "] State: %s (%s)\n", colour, associated_username, request_id, request_state_str, comment );
else
- vg_low( "req[%s%s##%hu" KWHT "] State: %s\n", colour, associated_username, pool_id, request_state_str );
+ vg_low( "req[%s%s##%hu" KWHT "] State: %s\n", colour, associated_username, request_id, request_state_str );
}
void network_send_request( netmsg_request *packet, vg_msg *body,
if( callback )
{
- packet->id = vg_pool_lru( &_net_requests.request_pool );
-#if defined( REQUEST_V2 )
+ u16 request_id = _net_requests.inactive.tail;
+
packet->uid = _net_requests.global_uid;
- if( _net_requests.global_uid == 0xffffffff )
- _net_requests.global_uid = 4;
- else
- _net_requests.global_uid ++;
-#endif
+ if( _net_requests.global_uid == 0xffffffff ) _net_requests.global_uid = 4;
+ else _net_requests.global_uid ++;
- if( packet->id )
+ if( request_id )
{
- vg_pool_watch( &_net_requests.request_pool, packet->id );
- net_request *req = vg_pool_item( &_net_requests.request_pool, packet->id );
+ vg_pool_switch( &_net_requests.pool, &_net_requests.inactive, &_net_requests.active, request_id );
+ net_request *req = _get_request( request_id );
req->callback = callback;
req->sendtime = vg.time_real;
req->userdata = userdata;
req->state = k_request_state_sent;
- log_request_status( req, NULL );
+ req->uid = packet->uid;
+ log_request_status( request_id, NULL );
}
else
{
}
}
else
- {
- packet->id = 0;
-#if defined( REQUEST_V2 )
packet->uid = 0;
-#endif
- }
SteamAPI_ISteamNetworkingSockets_SendMessageToConnection( _steam_api.pSteamNetworkingSockets, network_client.remote,
packet, sizeof(netmsg_request)+len,
network_send_request( packet, &data, NULL, 0 );
}
-static void _delete_request( net_request *request )
+static void _delete_request( u16 request_id )
{
- vg_pool_unwatch( &_net_requests.request_pool, vg_pool_id( &_net_requests.request_pool, request ) );
- request->state = k_request_state_none;
+ net_request *req = _get_request( request_id );
+ req->state = k_request_state_none;
+ vg_pool_switch( &_net_requests.pool, &_net_requests.active, &_net_requests.inactive, request_id );
+ if( _net_requests.transfer_active_id == request_id )
+ _net_requests.transfer_active_id = 0;
}
-static void _net_requests_degenerate( u8 request_id, const c8 *reason )
+static void _net_requests_reset( const c8 *reason )
{
- if( request_id != 0 )
+ if( _net_requests.transfer_active_id )
{
- net_request *request = vg_pool_item( &_net_requests.request_pool, request_id );
+ net_request *request = _get_request( _net_requests.transfer_active_id );
if( request->callback )
request->callback( NULL, 0, request->userdata, k_request_status_server_error );
request->state = k_request_state_error;
- log_request_status( request, reason );
- _delete_request( request );
-
- if( request_id == _net_requests.transfer_request_id )
- _net_requests.transfer_request_id = 0;
+ log_request_status( _net_requests.transfer_active_id, reason );
+ _delete_request( _net_requests.transfer_active_id );
}
+
+ _net_requests.transfer_expected_size = 0;
+ _net_requests.transfer_recieved = 0;
+ _net_requests.transfer_active_id = 0;
+ _net_requests.transfer_timeout = 0;
}
void _net_handle_response_message( SteamNetworkingMessage_t *msg )
{
netmsg_request *response = (netmsg_request *)msg->m_pData;
- if( (response->id == 0) || (response->id > NETWORK_MAX_REQUESTS) )
+ net_request *request = NULL;
+ u16 id = _net_requests.transfer_active_id;
+
+ /* Check if matches active request */
+ if( id != 0 )
{
- vg_error( "Response with invalid ID: %u.\n", response->id );
- _net_requests_degenerate( _net_requests.transfer_request_id,
- "Attempting to recover from stream error (Invalid request ID)\n" );
- return;
+ request = _get_request( id );
+ if( response->uid != request->uid )
+ {
+ _net_requests_reset( "Attempting to recover from stream error (Invalid response UID / Interrupted)\n" );
+ id = 0;
+ }
+ }
+
+ /* Check for updating active to another one */
+ if( id == 0 )
+ {
+ id = _net_requests.active.tail;
+ while( id )
+ {
+ request = _get_request( id );
+ if( request->uid == response->uid )
+ break;
+ else
+ id = vg_pool_next( &_net_requests.pool, id, 0 );
+ }
}
- net_request *request = vg_pool_item( &_net_requests.request_pool, response->id );
- if( request->state == k_request_state_none )
+ _net_requests.transfer_active_id = id;
+ if( id == 0 )
{
- vg_error( "Bad request state\n" );
- _net_requests_degenerate( _net_requests.transfer_request_id,
- "Attempting to recover from stream error (Bad request ID).\n" );
+ vg_error( "Recieved response packet for UID %u, which we are not tracking.\n", response->uid );
+ _net_requests_reset( "Stream error\n" );
return;
}
u32 byte_count = msg->m_cbSize - sizeof(netmsg_request);
-
if( response->status == k_request_status_transfer_header )
{
- _net_requests_degenerate( _net_requests.transfer_request_id, "Interrupted\n" );
+ if( _net_requests.transfer_expected_size || _net_requests.transfer_recieved )
+ {
+ _net_requests_reset( "Internal State Error (header message, with data already recieved)\n" );
+ return;
+ }
struct netmsg_transfer_header *header = (void *)response->buffer;
if( header->data_size > VG_MB(4) )
- _net_requests_degenerate( response->id, "Header specified size too large (>4mb)" );
+ {
+ _net_requests_reset( "Header specified size too large (>4mb)\n" );
+ return;
+ }
else
{
- _net_requests.transfer_request_id = response->id;
-#if defined( REQUEST_V2 )
- _net_requests.transfer_uid = header->uid;
-#endif
_net_requests.transfer_expected_size = header->data_size;
_net_requests.transfer_recieved = 0;
_net_requests.transfer_timeout = TRANSFER_TIMEOUT_SECONDS;
-
request->state = k_request_state_receiving;
- log_request_status( request, "New Transfer Header\n" );
+ log_request_status( id, "New Transfer Header\n" );
}
}
else if( response->status == k_request_status_transfer_continue )
{
- bool ids_match = 0;
- if( _net_requests.transfer_request_id == response->id )
+ if( _net_requests.transfer_expected_size == 0 )
{
-#if defined( REQUEST_V2 )
- if( response->uid == _net_requests.transfer_uid )
- ids_match = 1;
-#else
- ids_match = 1;
-#endif
+ _net_requests_reset( "Internal State Error (continue message, with no expected size)\n" );
+ return;
}
- if( ids_match )
+ u32 new_size = _net_requests.transfer_recieved + byte_count;
+ if( new_size > _net_requests.transfer_expected_size )
{
- u32 new_size = _net_requests.transfer_recieved + byte_count;
- if( new_size > _net_requests.transfer_expected_size )
- {
- _net_requests_degenerate( response->id, "Transfer exceeded declared size. Will not proceed using truncated data.\n" );
- return;
- }
+ _net_requests_reset( "Transfer exceeded declared size. Will not proceed using truncated data.\n" );
+ return;
+ }
- memcpy( _net_requests.transfer_buffer + _net_requests.transfer_recieved, response->buffer, byte_count );
- _net_requests.transfer_recieved += byte_count;
- _net_requests.transfer_timeout = TRANSFER_TIMEOUT_SECONDS;
+ memcpy( _net_requests.transfer_buffer + _net_requests.transfer_recieved, response->buffer, byte_count );
+ _net_requests.transfer_recieved += byte_count;
+ _net_requests.transfer_timeout = TRANSFER_TIMEOUT_SECONDS;
+
+ if( new_size == _net_requests.transfer_expected_size )
+ {
+ request->state = k_request_state_finished;
+ log_request_status( id, "Transfer completed\n" );
- if( new_size == _net_requests.transfer_expected_size )
+ if( request->callback )
{
- _net_requests.transfer_request_id = 0;
- request->state = k_request_state_finished;
- log_request_status( request, "Transfer completed\n" );
-
- if( request->callback )
- {
- request->callback( _net_requests.transfer_buffer, _net_requests.transfer_expected_size,
- request->userdata, k_request_status_ok );
- }
- else
- vg_warn( "Why are you requesting transfers, and then doing nothing with it?\n" );
-
- _delete_request( request );
+ request->callback( _net_requests.transfer_buffer, _net_requests.transfer_expected_size,
+ request->userdata, k_request_status_ok );
}
- }
- else
- {
- /* Current transfer- we have to assume it was interrupted,
- * New transfer- we have to assume broken because if it sent a header, it would have changed the transfer ID
- * correctly.
- *
- * Both get discarded.
- */
- _net_requests_degenerate( _net_requests.transfer_request_id, "Very broken stream\n" );
- _net_requests_degenerate( response->id, "Very broken stream\n" );
+ else
+ vg_warn( "Why are you requesting transfers, and then doing nothing with it?\n" );
+
+ _delete_request( id );
+ _net_requests_reset( "OK" );
}
}
else
- _net_requests_degenerate( response->id, "Defined server Error\n" );
+ _net_requests_reset( "Defined server Error (Our end is ok)\n" );
}
void _net_requests_init(void)
{
u32 alloc_size = sizeof(net_request)*NETWORK_MAX_REQUESTS;
- _net_requests.request_buffer = vg_stack_allocate( &vg.rtmem, alloc_size, 8, "Request buffer" );
- memset( _net_requests.request_buffer, 0, alloc_size );
_net_requests.transfer_buffer = vg_stack_allocate( &vg.rtmem, VG_MB(4), 8, "Request transfer data buffer" );
_net_requests.global_uid = time(NULL) ^ 0x35aa3203;
-
- vg_pool *pool = &_net_requests.request_pool;
- pool->buffer = _net_requests.request_buffer;
- pool->count = NETWORK_MAX_REQUESTS;
- pool->stride = sizeof( net_request );
- pool->offset = offsetof( net_request, poolnode );
- vg_pool_init( pool );
+ vg_pool_init( &_net_requests.pool, &_net_requests.inactive, NETWORK_MAX_REQUESTS, &vg.rtmem );
}
-void _net_requests_reset(void)
+void _net_requests_update(void)
{
- /* return the infinity stones */
- _net_requests.transfer_request_id = 0;
- _net_requests.transfer_expected_size = 0;
- _net_requests.transfer_recieved = 0;
- _net_requests.transfer_timeout = 0.0f;
- for( u32 i=0; i<NETWORK_MAX_REQUESTS; i ++ )
+ if( _net_requests.transfer_active_id )
{
- net_request *request = &_net_requests.request_buffer[ i ];
-
- if( request->state != k_request_state_none )
- {
- vg_warn( "Clipping request #%u. Timeout of some kind\n", i );
- request->state = k_request_state_none;
- vg_pool_unwatch( &_net_requests.request_pool, i+1 );
- }
+ _net_requests.transfer_timeout -= vg.time_frame_delta;
+ if( _net_requests.transfer_timeout < 0.0f )
+ _net_requests_reset( "Timed out.\n" );
}
}
-void _net_requests_update(void)
+void _net_requests_deleteall(void)
{
- if( _net_requests.transfer_request_id )
+ u16 id = _net_requests.active.head;
+ while( id )
{
- _net_requests.transfer_timeout -= vg.time_frame_delta;
- if( _net_requests.transfer_timeout < 0.0f )
- _net_requests_degenerate( _net_requests.transfer_request_id, "Timed out.\n" );
+ u16 next = vg_pool_next( &_net_requests.pool, id, 1 );
+ _delete_request( id );
+ id = next;
}
}
#pragma once
#include "network.h"
+
#define TRANSFER_TIMEOUT_SECONDS 5.0f
+#define NETWORK_MAX_REQUESTS 8
typedef struct net_request net_request;
struct net_request
{
- vg_pool_node poolnode;
- void (*callback)( void *data, u32 data_size, u64 userdata, enum request_status status );
- f64 sendtime;
- u64 userdata;
-
enum net_request_state
{
k_request_state_none,
k_request_state_max
}
state;
+ u32 uid;
+
+ void (*callback)( void *data, u32 data_size, u64 userdata, enum request_status status );
+ f64 sendtime;
+ u64 userdata;
};
struct _net_requests
{
- net_request *request_buffer;
- vg_pool request_pool;
+ net_request requests[ NETWORK_MAX_REQUESTS ];
+ vg_pool pool;
+ vg_pool_chain active, inactive;
void *transfer_buffer;
- u8 transfer_request_id;
u32 transfer_expected_size;
u32 transfer_recieved;
- u32 transfer_uid;
+ u16 transfer_active_id;
f32 transfer_timeout;
-
u32 global_uid;
}
extern _net_requests;
u64 userdata );
void _net_handle_response_message( SteamNetworkingMessage_t *msg );
void _net_requests_init(void);
-void _net_requests_reset(void);
+void _net_requests_deleteall(void);
+void _net_requests_update(void);
if( !cache_id )
return;
- struct addon_cache *cache = &_addon.cache[type];
- addon_cache_entry *entry = vg_pool_item( &cache->pool, cache_id );
+ addon_cache_entry *entry = get_addon_cache_entry( type, cache_id );
char uid[ ADDON_UID_MAX ];
addon_make_uid_cpart( entry->addon_id, uid, entry->local_cpart );
/* kinda jank.. */
if( info->player_id )
{
- struct addon_cache *cache = &_addon.cache[ k_addon_type_player ];
- addon_cache_entry *cache_entry = vg_pool_item( &cache->pool, localplayer.playermodel.cache_slot );
+ addon_cache_entry *cache_entry = get_addon_cache_entry( k_addon_type_player, localplayer.playermodel.cache_slot );
memcpy( cache_entry->local_cpart, info->player_cpart, ADDON_CPART_MAX );
}
}