18580477ff78ca93ec965e8cef981b22e52e9a44
[csRadar.git] / csrDraw.h
1 // Copyright (C) 2021 Harry Godden (hgn)
2
3 // Extremely simple software renderer. Only supports orthographic
4 //=======================================================================================================================
5
6 typedef struct csr_target csr_target;
7 typedef struct csr_filter csr_filter;
8 typedef struct csr_shader csr_shader;
9 typedef enum EMSAA EMSAA;
10
11 typedef void (* csr_frag_program)( void *, vmf_vert[3], float, float, float );
12 typedef void (* csr_frag_clear)( void * );
13
14 // API
15 //=======================================================================================================================
16
17 // Create a render target. Resolution, msaa, and shader must be known at this point!
18 void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader );
19 void csr_rt_clear( csr_target *rt );
20 void csr_rt_free( csr_target *rt );
21
22 // Refit bounds so that it is square, and crops to center with padding
23 void csr_auto_fit( csr_target *rt, float padding );
24
25 // Run this after bounds have been adjusted on the RT to update the size of the msaa
26 // Auto fit will call this.
27 void csr_update_subsamples( csr_target *rt );
28
29 // Write CS:GO radar txt
30 void csr_write_txt( char const *path, const char *name, csr_target *rt );
31
32 // Render calls
33 // ------------
34
35 // Render a finalzied triangle into target. Coordinates are world space
36 void simple_raster( csr_target *rt, vmf_vert tri[3] );
37
38 // Draw a batch of triangles with an affine world transformation
39 void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform );
40
41 // Draw VMF with filtering options. Will automatically branch into instances
42 // You should call this with the last two recursive arguments (prev,inst), set to NULL
43 //
44 // Filter is optional, it can be st to NULL to just render everything.
45 void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst );
46
47
48 void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc );
49 void csr_rt_save_c32f( csr_target *rt, const char *path, u32 offset );
50
51 // Obsolete
52 __attribute__ ((deprecated))
53 void csr_rt_save_buffers( csr_target *rt, const char *basename, const char *subname );
54
55 // Implementation
56 //=======================================================================================================================
57
58 struct csr_shader
59 {
60 u32 stride;
61 csr_frag_program frag;
62 csr_frag_clear clear;
63 };
64
65 struct csr_target
66 {
67 void *colour;
68 float *depth;
69
70 u32 x, y;
71 boxf bounds;
72 float scale;
73
74 v2f subsamples[ 8 ];
75 int num_samples;
76 v2f *sample_src;
77
78 csr_shader *shader;
79 };
80
81 struct csr_filter
82 {
83 const char *visgroup; // Limit to this visgroup only
84 const char *classname; // Limit to this exact classname. will not draw world
85
86 int compute_bounds_only;
87 };
88
89 enum EMSAA
90 {
91 k_EMSAA_none,
92 k_EMSAA_2x2,
93 k_EMSAA_RGSS,
94 k_EMSAA_8R
95 };
96
97 #ifdef CSR_EXECUTABLE
98
99 // MSAA patterns
100 v2f csr_msaa_1[] =
101 {
102 {0.f, 0.f}
103 };
104
105 // XX
106 // XX
107 v2f csr_msaa_2x2[] =
108 {
109 { 0x0.4p0f, 0x0.4p0f },
110 { 0x0.4p0f, -0x0.4p0f },
111 { -0x0.4p0f, -0x0.4p0f },
112 { -0x0.4p0f, 0x0.4p0f }
113 };
114
115 // X
116 // X
117 // X
118 // X
119 v2f csr_msaa_2x2rgss[] =
120 {
121 { 0x0.2p0f, 0x0.6p0f },
122 { -0x0.6p0f, 0x0.2p0f },
123 { -0x0.2p0f, -0x0.6p0f },
124 { 0x0.6p0f, -0x0.2p0f }
125 };
126
127 // X
128 // X
129 // X
130 // X
131 // X
132 // X
133 // X
134 // X
135 v2f csr_msaa_8rook[] =
136 {
137 { 0x0.1p0f, 0x0.7p0f },
138 { 0x0.5p0f, 0x0.1p0f },
139 { 0x0.7p0f, -0x0.3p0f },
140 { 0x0.3p0f, -0x0.5p0f },
141 { -0x0.1p0f, -0x0.7p0f },
142 { -0x0.5p0f, -0x0.1p0f },
143 { -0x0.7p0f, 0x0.3p0f },
144 { -0x0.3p0f, 0x0.5p0f }
145 };
146
147
148 void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader )
149 {
150 rt->x = x;
151 rt->y = y;
152
153 switch( aa )
154 {
155 default:
156 case k_EMSAA_none:
157 rt->num_samples = 1;
158 rt->sample_src = csr_msaa_1;
159 break;
160
161 case k_EMSAA_2x2:
162 rt->num_samples = 4;
163 rt->sample_src = csr_msaa_2x2;
164 break;
165
166 case k_EMSAA_RGSS:
167 rt->num_samples = 4;
168 rt->sample_src = csr_msaa_2x2rgss;
169 break;
170
171 case k_EMSAA_8R:
172 rt->num_samples = 8;
173 rt->sample_src = csr_msaa_8rook;
174 break;
175 }
176
177 rt->shader = shader;
178 rt->depth = (float *)csr_malloc( x*y*rt->num_samples * sizeof(float) );
179 rt->colour = csr_malloc( x * y * rt->shader->stride * rt->num_samples );
180
181 v3_fill( rt->bounds[0], INFINITY );
182 v3_fill( rt->bounds[1], -INFINITY );
183 }
184
185 void csr_update_subsamples( csr_target *rt )
186 {
187 float range_x = (rt->bounds[1][0]-rt->bounds[0][0]);
188 float range_y = (rt->bounds[1][1]-rt->bounds[0][1]);
189
190 v2f pixel_size = { range_x/(float)rt->x, range_y/(float)rt->y };
191
192 for( int i = 0; i < rt->num_samples; i ++ )
193 {
194 v2_mul( rt->sample_src[i], pixel_size, rt->subsamples[i] );
195 }
196 }
197
198 void csr_rt_free( csr_target *rt )
199 {
200 free( rt->depth );
201 free( rt->colour );
202 }
203
204 void csr_rt_clear( csr_target *rt )
205 {
206 for( u32 i = 0; i < rt->x*rt->y*rt->num_samples; i ++ )
207 {
208 rt->shader->clear( rt->colour + i * rt->shader->stride );
209 rt->depth[i] = 0.f;
210 }
211 }
212
213 void csr_auto_fit( csr_target *rt, float padding )
214 {
215 // Correct aspect ratio to be square
216 float dx, dy, l, cx, cy;
217
218 dx = rt->bounds[1][0] - rt->bounds[0][0];
219 dy = rt->bounds[1][1] - rt->bounds[0][1];
220
221 l = fmaxf( dx, dy ) * .5f;
222
223 cx = (rt->bounds[1][0] + rt->bounds[0][0]) * .5f;
224 cy = (rt->bounds[1][1] + rt->bounds[0][1]) * .5f;
225
226 rt->bounds[0][0] = cx - l - padding;
227 rt->bounds[1][0] = cx + l + padding;
228 rt->bounds[0][1] = cy - l - padding;
229 rt->bounds[1][1] = cy + l + padding;
230
231 rt->scale = l + padding;
232
233 csr_update_subsamples( rt );
234 }
235
236 void csr_write_txt( char const *path, const char *name, csr_target *rt )
237 {
238 FILE *write_ptr;
239
240 write_ptr = fopen( path, "w" );
241
242 fprintf( write_ptr, "\"%s\"\n\{\n", name );
243 fprintf( write_ptr, "\t\"material\" \"overviews/%s\"\n", name );
244 fprintf( write_ptr, "\t\"pos_x\" \"%.8f\"\n", rt->bounds[0][0] );
245 fprintf( write_ptr, "\t\"pos_y\" \"%.8f\"\n", rt->bounds[0][1] );
246 fprintf( write_ptr, "\t\"scale\" \"%.8f\"\n", rt->scale / (float)rt->x );
247 fprintf( write_ptr, "}\n" );
248
249 fclose( write_ptr );
250 }
251
252 void simple_raster( csr_target *rt, vmf_vert tri[3] )
253 {
254 // Very very simplified rasterizing algorithm
255 v2f bmin = { 0.f, 0.f };
256 v2f bmax = { rt->x, rt->y };
257
258 v2_minv( tri[0].co, tri[1].co, bmin );
259 v2_minv( tri[2].co, bmin, bmin );
260
261 v2_maxv( tri[0].co, tri[1].co, bmax );
262 v2_maxv( tri[2].co, bmax, bmax );
263
264 float range_x = (rt->bounds[1][0]-rt->bounds[0][0])/(float)rt->x;
265 float range_y = (rt->bounds[1][1]-rt->bounds[0][1])/(float)rt->y;
266
267 int start_x = csr_min( rt->x-1, csr_max( 0, floorf( (bmin[0]-rt->bounds[0][0])/range_x)));
268 int end_x = csr_max( 0, csr_min( rt->x-1, ceilf( (bmax[0]-rt->bounds[0][0])/range_x)));
269 int start_y = csr_min( rt->y-1, csr_max( 0, floorf( (bmin[1]-rt->bounds[0][1])/range_y)));
270 int end_y = csr_max( 0, csr_min( rt->y-1, ceilf( (bmax[1]-rt->bounds[0][1])/range_y)));
271
272 v2f v0, v1, v2, vp;
273 float d, bca = 0.f, bcb = 0.f, bcc = 0.f;
274
275 v2_sub( tri[1].co, tri[0].co, v0 );
276 v2_sub( tri[2].co, tri[0].co, v1 );
277 v2_sub( tri[1].co, tri[2].co, v2 );
278 d = 1.f / (v0[0]*v1[1] - v1[0]*v0[1]);
279
280 // Backface culling
281 if( v2_cross( v0, v1 ) > 0.f )
282 return;
283
284 v2f trace_origin;
285
286 for( u32 py = start_y; py <= end_y; py ++ )
287 {
288 trace_origin[1] = csr_lerpf( rt->bounds[0][1], rt->bounds[1][1], (float)py/(float)rt->y );
289
290 for( u32 px = start_x; px <= end_x; px ++ )
291 {
292 u32 sample_index = ((rt->y-py-1)*rt->x+px) * rt->num_samples;
293
294 void *frag = rt->colour + sample_index*rt->shader->stride;
295 float *depth = &rt->depth[ sample_index ];
296
297 trace_origin[0] = csr_lerpf( rt->bounds[0][0], rt->bounds[1][0], (float)px/(float)rt->x );
298
299 // Determine coverage
300 for( int i = 0; i < rt->num_samples; i ++ )
301 {
302 v3f sample_origin;
303
304 v2_add( rt->subsamples[ i ], trace_origin, sample_origin );
305 v2_sub( sample_origin, tri[0].co, vp );
306
307 if( v2_cross( v0, vp ) > 0.f )
308 continue;
309 if( v2_cross( vp, v1 ) > 0.f )
310 continue;
311
312 v2f vp2;
313 v2_sub( sample_origin, tri[2].co, vp2 );
314
315 if( v2_cross( vp2, v2 ) > 0.f )
316 continue;
317
318 bcb = (vp[0]*v1[1] - v1[0]*vp[1]) * d;
319 bcc = (v0[0]*vp[1] - vp[0]*v0[1]) * d;
320 bca = 1.f - bcb - bcc;
321
322 float hit = tri[0].co[2] * bca + tri[1].co[2] * bcb + tri[2].co[2] * bcc;
323 float hit_depth = hit + 16385.f;
324
325 if( hit_depth > depth[i] && hit >= rt->bounds[0][2] && hit <= rt->bounds[1][2] )
326 {
327 depth[i] = hit_depth;
328 rt->shader->frag( frag+i*rt->shader->stride, tri, bca, bcb, bcc );
329 }
330 }
331 }
332 }
333 }
334
335 void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform )
336 {
337 m3x3f normal;
338 vmf_vert new_tri[3];
339
340 // Derive normal matrix
341 m4x3_to_3x3( transform, normal );
342 m3x3_inv_transpose( normal, normal );
343
344 for( u32 i = 0; i < triangle_count; i ++ )
345 {
346 vmf_vert *triangle = triangles + i*3;
347
348 m4x3_mulv( transform, triangle[0].co, new_tri[0].co );
349 m4x3_mulv( transform, triangle[1].co, new_tri[1].co );
350 m4x3_mulv( transform, triangle[2].co, new_tri[2].co );
351
352 m3x3_mulv( normal, triangle[0].nrm, new_tri[0].nrm );
353 m3x3_mulv( normal, triangle[1].nrm, new_tri[1].nrm );
354 m3x3_mulv( normal, triangle[2].nrm, new_tri[2].nrm );
355
356 v3_normalize( new_tri[0].nrm );
357 v3_normalize( new_tri[1].nrm );
358 v3_normalize( new_tri[2].nrm );
359
360 m4x3_mulv( transform, triangles[0].origin, new_tri[0].origin );
361
362 simple_raster( rt, new_tri );
363 }
364 }
365
366 void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst )
367 {
368 m4x3f transform = M4X3_IDENTITY;
369 vmf_solid solid;
370 vmf_vert tri[3];
371 boxf trf_bounds;
372
373 u32 group_id = 0;
374 int filter_visgroups = 0, filter_classname = 0, compute_bounds_only = 0;
375
376 if( filter )
377 {
378 if( filter->visgroup )
379 {
380 filter_visgroups = 1;
381 group_id = vmf_visgroup_id( root, filter->visgroup );
382 }
383
384 if( filter->classname )
385 {
386 filter_classname = 1;
387 }
388
389 compute_bounds_only = filter->compute_bounds_only;
390 }
391
392 // Multiply previous transform with instance transform to create basis
393 if( prev )
394 {
395 m4x3_mul( prev, inst, transform );
396 }
397
398 // Gather world brushes
399 solidgen_ctx_init( &solid );
400
401 if( !filter_classname )
402 {
403 vdf_node *world = vdf_next( root, "world", NULL );
404
405 vdf_foreach( world, "solid", brush )
406 {
407 if( filter_visgroups && !vmf_visgroup_match( brush, group_id ) )
408 continue;
409
410 // TODO: heap-use-after-free
411 solidgen_push( &solid, brush );
412 }
413 }
414
415 // Actual entity loop
416 m4x3f model;
417
418 vdf_foreach( root, "entity", ent )
419 {
420 if( filter_visgroups && !vmf_visgroup_match( ent, group_id ) )
421 continue;
422
423 if( filter_classname )
424 if( strcmp( kv_get( ent, "classname", "" ), filter->classname ) )
425 continue;
426
427 if( ent->user & VMF_FLAG_IS_PROP )
428 {
429 // Create model transform
430 m4x3_identity( model );
431
432 vmf_entity_transform( ent, model );
433 m4x3_mul( transform, model, model );
434
435 // Draw model
436 mdl_mesh_t *mdl = &map->models[ ent->user1 ].mdl;
437
438 if( compute_bounds_only )
439 {
440 map->models[ ent->user1 ].need_load = 1;
441 m4x3_expand_aabb_point( model, rt->bounds, (v3f){0.f,0.f,0.f} );
442 }
443 else
444 {
445 for( int i = 0; i < mdl->num_indices/3; i ++ )
446 {
447 for( int j = 0; j < 3; j ++ )
448 {
449 v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8 ], tri[j].co );
450 v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8+3 ], tri[j].nrm );
451 v3_zero( tri[j].origin );
452 }
453
454 csr_draw( rt, tri, 1, model );
455 }
456 }
457 }
458 else if( ent->user & VMF_FLAG_IS_INSTANCE )
459 {
460 m4x3_identity( model );
461 vmf_entity_transform( ent, model );
462
463 csr_vmf_render( rt, map, map->cache[ ent->user1 ].root, filter, transform, model );
464 }
465 else
466 {
467 // Brush entity
468 vdf_foreach( ent, "solid", ent_solid )
469 {
470 solidgen_push( &solid, ent_solid );
471 }
472 }
473 }
474
475 if( compute_bounds_only )
476 {
477 solidgen_bounds( &solid, trf_bounds );
478 m4x3_transform_aabb( transform, trf_bounds );
479 box_concat( rt->bounds, trf_bounds );
480 }
481 else
482 {
483 // Draw brushes
484 for( int i = 0; i < csr_sb_count( solid.indices )/3; i ++ )
485 {
486 u32 * base = solid.indices + i*3;
487
488 tri[0] = solid.verts[ base[0] ];
489 tri[1] = solid.verts[ base[1] ];
490 tri[2] = solid.verts[ base[2] ];
491
492 csr_draw( rt, tri, 1, transform );
493 }
494 }
495
496 solidgen_ctx_reset( &solid );
497 solidgen_ctx_free( &solid );
498 }
499
500 // Obsolete
501 void csr_rt_save_buffers( csr_target *rt, const char *basename, const char *subname )
502 {
503 char output[ 512 ];
504
505 float *image = (float *)csr_malloc( 1024*1024*sizeof(float)*3 );
506
507 float contrib = 1.f/(float)rt->num_samples;
508
509 for( int l = 0; l < rt->x; l ++ )
510 {
511 for( int x = 0; x < rt->y; x ++ )
512 {
513 float *dst = &image[ (l*1024+x)*3 ];
514 void *src = rt->colour + ((1023-l)*1024+x) * rt->num_samples * rt->shader->stride;
515
516 v3_muls( (float *)src, contrib, dst );
517
518 for( int j = 1; j < rt->num_samples; j ++ )
519 {
520 v3_muladds( dst, (float *)(src + j*rt->shader->stride), contrib, dst );
521 }
522 }
523 }
524
525 // Save position buffer
526 strcpy( output, basename );
527 strcat( output, "." );
528 strcat( output, subname );
529 strcat( output, "_position.pfm" );
530 csr_32f_write( output, rt->x, rt->y, image );
531
532 free( image );
533 }
534
535 // ALWAYS RGB32
536 void csr_rt_save_c32f( csr_target *rt, const char *path, u32 offset )
537 {
538 float *image = (float *)csr_malloc( rt->x*rt->y*3*sizeof(float) );
539
540 float contrib = 255.f/(float)rt->num_samples;
541
542 for( int i = 0; i < rt->x*rt->y; i ++ )
543 {
544 void *src = rt->colour + offset + i * rt->num_samples * rt->shader->stride;
545 float *dst = image + i*3;
546
547 v3_zero( dst );
548 for( int k = 0; k < rt->num_samples; k ++ )
549 {
550 v3_muladds( dst, (float *)(src + k*rt->shader->stride), contrib, dst );
551 }
552 }
553
554 csr_32f_write( path, rt->x, rt->y, image );
555 free( image );
556 }
557
558 // Save floating point buffer to tga. Must be in range (0-1)
559 // Offset and stride are in bytes
560 void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc )
561 {
562 u8 *image = (u8 *)csr_malloc( rt->x*rt->y * 4 );
563
564 float contrib = 255.f/(float)rt->num_samples;
565
566 for( int i = 0; i < rt->x*rt->y; i ++ )
567 {
568 void *src = rt->colour + offset + i * rt->num_samples * rt->shader->stride;
569 u8 *dst = image + i*4;
570
571 v4f accum = { 0.f, 0.f, 0.f, 0.f };
572
573 for( int k = 0; k < rt->num_samples; k ++ )
574 {
575 float *src_sample = (float *)(src + k*rt->shader->stride);
576
577 for( int j = 0; j < nc; j ++ )
578 {
579 accum[ j ] += src_sample[ j ] * contrib;
580 }
581 }
582
583 // TODO: Clamp this value
584 dst[0] = accum[0];
585 dst[1] = accum[1];
586 dst[2] = accum[2];
587 dst[3] = accum[3];
588 }
589
590 csr_tga_write( path, rt->x, rt->y, nc, image );
591 free( image );
592 }
593
594 #endif