-typedef struct csr_frag csr_frag;
+// Copyright (C) 2021 Harry Godden (hgn)
-struct csr_frag
+// Extremely simple software renderer. Only supports orthographic
+//=======================================================================================================================
+
+typedef struct csr_target csr_target;
+typedef struct csr_filter csr_filter;
+typedef struct csr_shader csr_shader;
+typedef enum EMSAA EMSAA;
+
+typedef void (* csr_frag_program)( void *, vmf_vert[3], float, float, float );
+typedef void (* csr_frag_clear)( void * );
+
+// API
+//=======================================================================================================================
+
+// Create a render target. Resolution, msaa, and shader must be known at this point!
+void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader );
+void csr_rt_clear( csr_target *rt );
+void csr_rt_free( csr_target *rt );
+
+// Refit bounds so that it is square, and crops to center with padding
+void csr_auto_fit( csr_target *rt, float padding );
+
+// Run this after bounds have been adjusted on the RT to update the size of the msaa
+// Auto fit will call this.
+void csr_update_subsamples( csr_target *rt );
+
+// Write CS:GO radar txt
+void csr_write_txt( char const *path, const char *name, csr_target *rt );
+
+// Render calls
+// ------------
+
+// Render a finalzied triangle into target. Coordinates are world space
+void simple_raster( csr_target *rt, vmf_vert tri[3] );
+
+// Draw a batch of triangles with an affine world transformation
+void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform );
+
+// Draw VMF with filtering options. Will automatically branch into instances
+// You should call this with the last two recursive arguments (prev,inst), set to NULL
+//
+// Filter is optional, it can be st to NULL to just render everything.
+void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst );
+
+void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc );
+void csr_rt_save_c32f( csr_target *rt, const char *path, u32 offset );
+
+
+// Implementation
+//=======================================================================================================================
+
+struct csr_shader
+{
+ u32 stride;
+ csr_frag_program frag;
+ csr_frag_clear clear;
+};
+
+struct csr_target
+{
+ void *colour;
+ float *depth;
+
+ u32 x, y;
+ boxf bounds;
+ float scale;
+
+ v2f subsamples[ 8 ];
+ int num_samples;
+ v2f *sample_src;
+
+ csr_shader *shader;
+};
+
+struct csr_filter
{
- u32 id; // Triangle index
- float qa, qb; // Quantities
+ const char *visgroup; // Limit to this visgroup only
+ const char *classname; // Limit to this exact classname. will not draw world
- float depth; // 'depth testing'
+ int compute_bounds_only;
+};
+
+enum EMSAA
+{
+ k_EMSAA_none,
+ k_EMSAA_2x2,
+ k_EMSAA_RGSS,
+ k_EMSAA_8R
+};
+
+#ifdef CSR_EXECUTABLE
+
+// MSAA patterns
+v2f csr_msaa_1[] =
+{
+ {0.f, 0.f}
+};
+
+// XX
+// XX
+v2f csr_msaa_2x2[] =
+{
+ { 0x0.4p0f, 0x0.4p0f },
+ { 0x0.4p0f, -0x0.4p0f },
+ { -0x0.4p0f, -0x0.4p0f },
+ { -0x0.4p0f, 0x0.4p0f }
};
-void clear_depth( csr_frag fragments[], u32 x, u32 y )
+// X
+// X
+// X
+// X
+v2f csr_msaa_2x2rgss[] =
{
- for( u32 i = 0; i < x*y; i ++ )
+ { 0x0.2p0f, 0x0.6p0f },
+ { -0x0.6p0f, 0x0.2p0f },
+ { -0x0.2p0f, -0x0.6p0f },
+ { 0x0.6p0f, -0x0.2p0f }
+};
+
+// X
+// X
+// X
+// X
+// X
+// X
+// X
+// X
+v2f csr_msaa_8rook[] =
+{
+ { 0x0.1p0f, 0x0.7p0f },
+ { 0x0.5p0f, 0x0.1p0f },
+ { 0x0.7p0f, -0x0.3p0f },
+ { 0x0.3p0f, -0x0.5p0f },
+ { -0x0.1p0f, -0x0.7p0f },
+ { -0x0.5p0f, -0x0.1p0f },
+ { -0x0.7p0f, 0x0.3p0f },
+ { -0x0.3p0f, 0x0.5p0f }
+};
+
+
+void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader )
+{
+ rt->x = x;
+ rt->y = y;
+
+ switch( aa )
{
- fragments[ i ].depth = INFINITY;
+ default:
+ case k_EMSAA_none:
+ rt->num_samples = 1;
+ rt->sample_src = csr_msaa_1;
+ break;
+
+ case k_EMSAA_2x2:
+ rt->num_samples = 4;
+ rt->sample_src = csr_msaa_2x2;
+ break;
+
+ case k_EMSAA_RGSS:
+ rt->num_samples = 4;
+ rt->sample_src = csr_msaa_2x2rgss;
+ break;
+
+ case k_EMSAA_8R:
+ rt->num_samples = 8;
+ rt->sample_src = csr_msaa_8rook;
+ break;
}
+
+ rt->shader = shader;
+ rt->depth = (float *)csr_malloc( x*y*rt->num_samples * sizeof(float) );
+ rt->colour = csr_malloc( x * y * rt->shader->stride * rt->num_samples );
+
+ v3_fill( rt->bounds[0], INFINITY );
+ v3_fill( rt->bounds[1], -INFINITY );
+}
+
+void csr_update_subsamples( csr_target *rt )
+{
+ float range_x = (rt->bounds[1][0]-rt->bounds[0][0]);
+ float range_y = (rt->bounds[1][1]-rt->bounds[0][1]);
+
+ v2f pixel_size = { range_x/(float)rt->x, range_y/(float)rt->y };
+
+ for( int i = 0; i < rt->num_samples; i ++ )
+ {
+ v2_mul( rt->sample_src[i], pixel_size, rt->subsamples[i] );
+ }
+}
+
+void csr_rt_free( csr_target *rt )
+{
+ free( rt->depth );
+ free( rt->colour );
}
-void simple_raster( csr_frag fragments[], u32 x, u32 y, v4f cam_bounds, vmf_vert tri[3], int id )
+void csr_rt_clear( csr_target *rt )
{
- // Very simplified tracing algorithm
+ for( u32 i = 0; i < rt->x*rt->y*rt->num_samples; i ++ )
+ {
+ rt->shader->clear( rt->colour + i * rt->shader->stride );
+ rt->depth[i] = 0.f;
+ }
+}
+
+void csr_auto_fit( csr_target *rt, float padding )
+{
+ // Correct aspect ratio to be square
+ float dx, dy, l, cx, cy;
+
+ dx = rt->bounds[1][0] - rt->bounds[0][0];
+ dy = rt->bounds[1][1] - rt->bounds[0][1];
+
+ l = fmaxf( dx, dy ) * .5f;
+
+ cx = (rt->bounds[1][0] + rt->bounds[0][0]) * .5f;
+ cy = (rt->bounds[1][1] + rt->bounds[0][1]) * .5f;
+
+ rt->bounds[0][0] = cx - l - padding;
+ rt->bounds[1][0] = cx + l + padding;
+ rt->bounds[0][1] = cy - l - padding;
+ rt->bounds[1][1] = cy + l + padding;
+
+ rt->scale = l + padding;
+
+ csr_update_subsamples( rt );
+}
+void simple_raster( csr_target *rt, vmf_vert tri[3] )
+{
+ // Very very simplified rasterizing algorithm
v2f bmin = { 0.f, 0.f };
- v2f bmax = { x, y };
+ v2f bmax = { rt->x, rt->y };
v2_minv( tri[0].co, tri[1].co, bmin );
v2_minv( tri[2].co, bmin, bmin );
v2_maxv( tri[0].co, tri[1].co, bmax );
v2_maxv( tri[2].co, bmax, bmax );
- float range_x = (cam_bounds[2]-cam_bounds[0])/(float)x;
- float range_y = (cam_bounds[3]-cam_bounds[1])/(float)y;
-
- int start_x = csr_max( 0, floorf( (bmin[0]-cam_bounds[0])/range_x));
- int end_x = csr_min( x, floorf( (bmax[0]-cam_bounds[0])/range_x ));
- int start_y = csr_max( 0, ceilf( (bmin[1]-cam_bounds[1])/range_y ));
- int end_y = csr_min( y, ceilf( (bmax[1]-cam_bounds[1])/range_y ));
+ float range_x = (rt->bounds[1][0]-rt->bounds[0][0])/(float)rt->x;
+ float range_y = (rt->bounds[1][1]-rt->bounds[0][1])/(float)rt->y;
+
+ int start_x = csr_min( rt->x-1, csr_max( 0, floorf( (bmin[0]-rt->bounds[0][0])/range_x)));
+ int end_x = csr_max( 0, csr_min( rt->x-1, ceilf( (bmax[0]-rt->bounds[0][0])/range_x)));
+ int start_y = csr_min( rt->y-1, csr_max( 0, floorf( (bmin[1]-rt->bounds[0][1])/range_y)));
+ int end_y = csr_max( 0, csr_min( rt->y-1, ceilf( (bmax[1]-rt->bounds[0][1])/range_y)));
- v3f trace_dir = { 0.f, 0.f, -1.f };
- v3f trace_origin = { 0.f, 0.f, 16385.f };
+ v2f v0, v1, v2, vp;
+ float d, bca = 0.f, bcb = 0.f, bcc = 0.f;
+
+ v2_sub( tri[1].co, tri[0].co, v0 );
+ v2_sub( tri[2].co, tri[0].co, v1 );
+ v2_sub( tri[1].co, tri[2].co, v2 );
+ d = 1.f / (v0[0]*v1[1] - v1[0]*v0[1]);
+
+ // Backface culling
+ if( v2_cross( v0, v1 ) > 0.f )
+ return;
+
+ v2f trace_origin;
- for( u32 py = start_y; py < end_y; py ++ )
+ for( u32 py = start_y; py <= end_y; py ++ )
{
- trace_origin[1] = csr_lerpf( cam_bounds[1], cam_bounds[3], (float)py/(float)y );
+ trace_origin[1] = csr_lerpf( rt->bounds[0][1], rt->bounds[1][1], (float)py/(float)rt->y );
- for( u32 px = start_x; px < end_x; px ++ )
+ for( u32 px = start_x; px <= end_x; px ++ )
{
- trace_origin[0] = csr_lerpf( cam_bounds[0], cam_bounds[2], (float)px/(float)x );
-
- csr_frag *frag = &fragments[ py*y + px ];
+ u32 sample_index = ((rt->y-py-1)*rt->x+px) * rt->num_samples;
+
+ void *frag = rt->colour + sample_index*rt->shader->stride;
+ float *depth = &rt->depth[ sample_index ];
- float tqa = 0.f, tqb = 0.f;
- float tdepth = csr_ray_tri( trace_origin, trace_dir, tri[0].co, tri[1].co, tri[2].co, &tqa, &tqb );
+ trace_origin[0] = csr_lerpf( rt->bounds[0][0], rt->bounds[1][0], (float)px/(float)rt->x );
- if( tdepth < frag->depth )
+ // Determine coverage
+ for( int i = 0; i < rt->num_samples; i ++ )
{
- frag->depth = tdepth;
- frag->id = id;
- frag->qa = tqa;
- frag->qb = tqb;
+ v3f sample_origin;
+
+ v2_add( rt->subsamples[ i ], trace_origin, sample_origin );
+ v2_sub( sample_origin, tri[0].co, vp );
+
+ if( v2_cross( v0, vp ) > 0.f )
+ continue;
+ if( v2_cross( vp, v1 ) > 0.f )
+ continue;
+
+ v2f vp2;
+ v2_sub( sample_origin, tri[2].co, vp2 );
+
+ if( v2_cross( vp2, v2 ) > 0.f )
+ continue;
+
+ bcb = (vp[0]*v1[1] - v1[0]*vp[1]) * d;
+ bcc = (v0[0]*vp[1] - vp[0]*v0[1]) * d;
+ bca = 1.f - bcb - bcc;
+
+ float hit = tri[0].co[2] * bca + tri[1].co[2] * bcb + tri[2].co[2] * bcc;
+ float hit_depth = hit + 16385.f;
+
+ if( hit_depth > depth[i] && hit >= rt->bounds[0][2] && hit <= rt->bounds[1][2] )
+ {
+ depth[i] = hit_depth;
+ rt->shader->frag( frag+i*rt->shader->stride, tri, bca, bcb, bcc );
+ }
}
}
}
}
-// First pass 'fragmentize'
-void draw_buffers( csr_frag fragments[], u32 x, u32 y, v4f cam_bounds, vmf_vert *triangles, u32 triangle_count )
+void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform )
{
+ m3x3f normal;
+ vmf_vert new_tri[3];
+
+ // Derive normal matrix
+ m4x3_to_3x3( transform, normal );
+ m3x3_inv_transpose( normal, normal );
+
for( u32 i = 0; i < triangle_count; i ++ )
{
vmf_vert *triangle = triangles + i*3;
- simple_raster( fragments, x, y, cam_bounds, triangle, i );
+
+ m4x3_mulv( transform, triangle[0].co, new_tri[0].co );
+ m4x3_mulv( transform, triangle[1].co, new_tri[1].co );
+ m4x3_mulv( transform, triangle[2].co, new_tri[2].co );
+
+ m3x3_mulv( normal, triangle[0].nrm, new_tri[0].nrm );
+ m3x3_mulv( normal, triangle[1].nrm, new_tri[1].nrm );
+ m3x3_mulv( normal, triangle[2].nrm, new_tri[2].nrm );
+
+ v3_normalize( new_tri[0].nrm );
+ v3_normalize( new_tri[1].nrm );
+ v3_normalize( new_tri[2].nrm );
+
+ m4x3_mulv( transform, triangles[0].origin, new_tri[0].origin );
+
+ simple_raster( rt, new_tri );
+ }
+}
+
+void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst )
+{
+ m4x3f transform = M4X3_IDENTITY;
+ vmf_solid solid;
+ vmf_vert tri[3];
+ boxf trf_bounds;
+
+ u32 group_id = 0;
+ int filter_visgroups = 0, filter_classname = 0, compute_bounds_only = 0;
+
+ if( filter )
+ {
+ if( filter->visgroup )
+ {
+ filter_visgroups = 1;
+ group_id = vmf_visgroup_id( root, filter->visgroup );
+ }
+
+ if( filter->classname )
+ {
+ filter_classname = 1;
+ }
+
+ compute_bounds_only = filter->compute_bounds_only;
+ }
+
+ // Multiply previous transform with instance transform to create basis
+ if( prev )
+ {
+ m4x3_mul( prev, inst, transform );
+ }
+
+ // Gather world brushes
+ solidgen_ctx_init( &solid );
+
+ if( !filter_classname )
+ {
+ vdf_node *world = vdf_next( root, "world", NULL );
+
+ vdf_foreach( world, "solid", brush )
+ {
+ if( filter_visgroups && !vmf_visgroup_match( brush, group_id ) )
+ continue;
+
+ // TODO: heap-use-after-free
+ solidgen_push( &solid, brush );
+ }
+ }
+
+ // Actual entity loop
+ m4x3f model;
+
+ vdf_foreach( root, "entity", ent )
+ {
+ if( filter_visgroups && !vmf_visgroup_match( ent, group_id ) )
+ continue;
+
+ if( filter_classname )
+ if( strcmp( kv_get( ent, "classname", "" ), filter->classname ) )
+ continue;
+
+ if( ent->user & VMF_FLAG_IS_PROP )
+ {
+ // Create model transform
+ m4x3_identity( model );
+
+ vmf_entity_transform( ent, model );
+ m4x3_mul( transform, model, model );
+
+ // Draw model
+ mdl_mesh_t *mdl = &map->models[ ent->user1 ].mdl;
+
+ if( compute_bounds_only )
+ {
+ map->models[ ent->user1 ].need_load = 1;
+ m4x3_expand_aabb_point( model, rt->bounds, (v3f){0.f,0.f,0.f} );
+ }
+ else
+ {
+ for( int i = 0; i < mdl->num_indices/3; i ++ )
+ {
+ for( int j = 0; j < 3; j ++ )
+ {
+ v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8 ], tri[j].co );
+ v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8+3 ], tri[j].nrm );
+ v3_zero( tri[j].origin );
+ }
+
+ csr_draw( rt, tri, 1, model );
+ }
+ }
+ }
+ else if( ent->user & VMF_FLAG_IS_INSTANCE )
+ {
+ m4x3_identity( model );
+ vmf_entity_transform( ent, model );
+
+ csr_vmf_render( rt, map, map->cache[ ent->user1 ].root, filter, transform, model );
+ }
+ else
+ {
+ // Brush entity
+ vdf_foreach( ent, "solid", ent_solid )
+ {
+ solidgen_push( &solid, ent_solid );
+ }
+ }
}
+
+ if( compute_bounds_only )
+ {
+ solidgen_bounds( &solid, trf_bounds );
+ m4x3_transform_aabb( transform, trf_bounds );
+ box_concat( rt->bounds, trf_bounds );
+ }
+ else
+ {
+ // Draw brushes
+ for( int i = 0; i < csr_sb_count( solid.indices )/3; i ++ )
+ {
+ u32 * base = solid.indices + i*3;
+
+ tri[0] = solid.verts[ base[0] ];
+ tri[1] = solid.verts[ base[1] ];
+ tri[2] = solid.verts[ base[2] ];
+
+ csr_draw( rt, tri, 1, transform );
+ }
+ }
+
+ solidgen_ctx_reset( &solid );
+ solidgen_ctx_free( &solid );
+}
+
+void csr_write_filerr( const char *path )
+{
+ log_error( "File write error (No such file or directory): '%s'\n", path );
}
+
+void csr_write_txt( char const *path, const char *name, csr_target *rt )
+{
+ FILE *write_ptr;
+
+ write_ptr = fopen( path, "w" );
+
+ if( write_ptr )
+ {
+ fprintf( write_ptr, "\"%s\"\n\{\n", name );
+ fprintf( write_ptr, "\t\"material\" \"overviews/%s\"\n", name );
+ fprintf( write_ptr, "\t\"pos_x\" \"%.8f\"\n", rt->bounds[0][0] );
+ fprintf( write_ptr, "\t\"pos_y\" \"%.8f\"\n", rt->bounds[0][1] );
+ fprintf( write_ptr, "\t\"scale\" \"%.8f\"\n", rt->scale / (float)rt->x );
+ fprintf( write_ptr, "}\n" );
+
+ fclose( write_ptr );
+ }
+ else
+ {
+ csr_write_filerr( path );
+ }
+}
+
+// ALWAYS RGB32
+void csr_rt_save_c32f( csr_target *rt, const char *path, u32 offset )
+{
+ float *image = (float *)csr_malloc( rt->x*rt->y*3*sizeof(float) );
+
+ float contrib = 1.f/(float)rt->num_samples;
+
+ for( int i = 0; i < rt->x*rt->y; i ++ )
+ {
+ void *src = rt->colour + offset + i * rt->num_samples * rt->shader->stride;
+ float *dst = image + i*3;
+
+ v3_zero( dst );
+ for( int k = 0; k < rt->num_samples; k ++ )
+ {
+ v3_muladds( dst, (float *)(src + k*rt->shader->stride), contrib, dst );
+ }
+ }
+
+ if( !csr_32f_write( path, rt->x, rt->y, image ) )
+ csr_write_filerr( path );
+
+ free( image );
+}
+
+// Save floating point buffer to tga. Must be in range (0-1)
+// Offset and stride are in bytes
+void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc )
+{
+ u8 *image = (u8 *)csr_malloc( rt->x*rt->y * 4 );
+
+ float contrib = 255.f/(float)rt->num_samples;
+
+ for( int i = 0; i < rt->x*rt->y; i ++ )
+ {
+ void *src = rt->colour + offset + i * rt->num_samples * rt->shader->stride;
+ u8 *dst = image + i*4;
+
+ v4f accum = { 0.f, 0.f, 0.f, 0.f };
+
+ for( int k = 0; k < rt->num_samples; k ++ )
+ {
+ float *src_sample = (float *)(src + k*rt->shader->stride);
+
+ for( int j = 0; j < nc; j ++ )
+ {
+ accum[ j ] += src_sample[ j ] * contrib;
+ }
+ }
+
+ // TODO: Clamp this value
+ dst[0] = accum[0];
+ dst[1] = accum[1];
+ dst[2] = accum[2];
+ dst[3] = accum[3];
+ }
+
+ if( !csr_tga_write( path, rt->x, rt->y, nc, image ) )
+ csr_write_filerr( path );
+
+ free( image );
+}
+
+#endif