tga & normals
[csRadar.git] / csrDraw.h
1 // Copyright (C) 2021 Harry Godden (hgn)
2
3 // Extremely simple software renderer. Only supports orthographic
4 //=======================================================================================================================
5
6 typedef struct csr_target csr_target;
7 typedef struct csr_filter csr_filter;
8 typedef struct csr_shader csr_shader;
9 typedef enum EMSAA EMSAA;
10
11 typedef void (* csr_frag_program)( void *, vmf_vert[3], float, float, float );
12 typedef void (* csr_frag_clear)( void * );
13
14 // API
15 //=======================================================================================================================
16
17 // Create a render target. Resolution, msaa, and shader must be known at this point!
18 void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader );
19 void csr_rt_clear( csr_target *rt );
20 void csr_rt_free( csr_target *rt );
21
22 // Refit bounds so that it is square, and crops to center with padding
23 void csr_auto_fit( csr_target *rt, float padding );
24
25 // Run this after bounds have been adjusted on the RT to update the size of the msaa
26 // Auto fit will call this.
27 void csr_update_subsamples( csr_target *rt );
28
29 // Write CS:GO radar txt
30 void csr_write_txt( char const *path, const char *name, csr_target *rt );
31
32 // Render calls
33 // ------------
34
35 // Render a finalzied triangle into target. Coordinates are world space
36 void simple_raster( csr_target *rt, vmf_vert tri[3] );
37
38 // Draw a batch of triangles with an affine world transformation
39 void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform );
40
41 // Draw VMF with filtering options. Will automatically branch into instances
42 // You should call this with the last two recursive arguments (prev,inst), set to NULL
43 //
44 // Filter is optional, it can be st to NULL to just render everything.
45 void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst );
46
47
48 void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc );
49
50 // Obsolete
51 void csr_rt_save_buffers( csr_target *rt, const char *basename, const char *subname );
52
53 // Implementation
54 //=======================================================================================================================
55
56 struct csr_shader
57 {
58 u32 stride;
59 csr_frag_program frag;
60 csr_frag_clear clear;
61 };
62
63 struct csr_target
64 {
65 void *colour;
66 float *depth;
67
68 u32 x, y;
69 boxf bounds;
70 float scale;
71
72 v2f subsamples[ 8 ];
73 int num_samples;
74 v2f *sample_src;
75
76 csr_shader *shader;
77 };
78
79 struct csr_filter
80 {
81 const char *visgroup; // Limit to this visgroup only
82 const char *classname; // Limit to this exact classname. will not draw world
83
84 int compute_bounds_only;
85 };
86
87 enum EMSAA
88 {
89 k_EMSAA_none,
90 k_EMSAA_2x2,
91 k_EMSAA_RGSS,
92 k_EMSAA_8R
93 };
94
95 #ifdef CSR_EXECUTABLE
96
97 // MSAA patterns
98 v2f csr_msaa_1[] =
99 {
100 {0.f, 0.f}
101 };
102
103 // XX
104 // XX
105 v2f csr_msaa_2x2[] =
106 {
107 { 0x0.4p0f, 0x0.4p0f },
108 { 0x0.4p0f, -0x0.4p0f },
109 { -0x0.4p0f, -0x0.4p0f },
110 { -0x0.4p0f, 0x0.4p0f }
111 };
112
113 // X
114 // X
115 // X
116 // X
117 v2f csr_msaa_2x2rgss[] =
118 {
119 { 0x0.2p0f, 0x0.6p0f },
120 { -0x0.6p0f, 0x0.2p0f },
121 { -0x0.2p0f, -0x0.6p0f },
122 { 0x0.6p0f, -0x0.2p0f }
123 };
124
125 // X
126 // X
127 // X
128 // X
129 // X
130 // X
131 // X
132 // X
133 v2f csr_msaa_8rook[] =
134 {
135 { 0x0.1p0f, 0x0.7p0f },
136 { 0x0.5p0f, 0x0.1p0f },
137 { 0x0.7p0f, -0x0.3p0f },
138 { 0x0.3p0f, -0x0.5p0f },
139 { -0x0.1p0f, -0x0.7p0f },
140 { -0x0.5p0f, -0x0.1p0f },
141 { -0x0.7p0f, 0x0.3p0f },
142 { -0x0.3p0f, 0x0.5p0f }
143 };
144
145
146 void csr_create_target( csr_target *rt, u32 x, u32 y, EMSAA aa, csr_shader *shader )
147 {
148 rt->x = x;
149 rt->y = y;
150
151 switch( aa )
152 {
153 default:
154 case k_EMSAA_none:
155 rt->num_samples = 1;
156 rt->sample_src = csr_msaa_1;
157 break;
158
159 case k_EMSAA_2x2:
160 rt->num_samples = 4;
161 rt->sample_src = csr_msaa_2x2;
162 break;
163
164 case k_EMSAA_RGSS:
165 rt->num_samples = 4;
166 rt->sample_src = csr_msaa_2x2rgss;
167 break;
168
169 case k_EMSAA_8R:
170 rt->num_samples = 8;
171 rt->sample_src = csr_msaa_8rook;
172 break;
173 }
174
175 rt->shader = shader;
176 rt->depth = (float *)csr_malloc( x*y*rt->num_samples * sizeof(float) );
177 rt->colour = csr_malloc( x * y * rt->shader->stride * rt->num_samples );
178
179 v3_fill( rt->bounds[0], INFINITY );
180 v3_fill( rt->bounds[1], -INFINITY );
181 }
182
183 void csr_update_subsamples( csr_target *rt )
184 {
185 float range_x = (rt->bounds[1][0]-rt->bounds[0][0]);
186 float range_y = (rt->bounds[1][1]-rt->bounds[0][1]);
187
188 v2f pixel_size = { range_x/(float)rt->x, range_y/(float)rt->y };
189
190 for( int i = 0; i < rt->num_samples; i ++ )
191 {
192 v2_mul( rt->sample_src[i], pixel_size, rt->subsamples[i] );
193 }
194 }
195
196 void csr_rt_free( csr_target *rt )
197 {
198 free( rt->depth );
199 free( rt->colour );
200 }
201
202 void csr_rt_clear( csr_target *rt )
203 {
204 for( u32 i = 0; i < rt->x*rt->y*rt->num_samples; i ++ )
205 {
206 rt->shader->clear( rt->colour + i * rt->shader->stride );
207 rt->depth[i] = 0.f;
208 }
209 }
210
211 void csr_auto_fit( csr_target *rt, float padding )
212 {
213 // Correct aspect ratio to be square
214 float dx, dy, l, cx, cy;
215
216 dx = rt->bounds[1][0] - rt->bounds[0][0];
217 dy = rt->bounds[1][1] - rt->bounds[0][1];
218
219 l = fmaxf( dx, dy ) * .5f;
220
221 cx = (rt->bounds[1][0] + rt->bounds[0][0]) * .5f;
222 cy = (rt->bounds[1][1] + rt->bounds[0][1]) * .5f;
223
224 rt->bounds[0][0] = cx - l - padding;
225 rt->bounds[1][0] = cx + l + padding;
226 rt->bounds[0][1] = cy - l - padding;
227 rt->bounds[1][1] = cy + l + padding;
228
229 rt->scale = l + padding;
230
231 csr_update_subsamples( rt );
232 }
233
234 void csr_write_txt( char const *path, const char *name, csr_target *rt )
235 {
236 FILE *write_ptr;
237
238 write_ptr = fopen( path, "w" );
239
240 fprintf( write_ptr, "\"%s\"\n\{\n", name );
241 fprintf( write_ptr, "\t\"material\" \"overviews/%s\"\n", name );
242 fprintf( write_ptr, "\t\"pos_x\" \"%.8f\"\n", rt->bounds[0][0] );
243 fprintf( write_ptr, "\t\"pos_y\" \"%.8f\"\n", rt->bounds[0][1] );
244 fprintf( write_ptr, "\t\"scale\" \"%.8f\"\n", rt->scale / (float)rt->x );
245 fprintf( write_ptr, "}\n" );
246
247 fclose( write_ptr );
248 }
249
250 void simple_raster( csr_target *rt, vmf_vert tri[3] )
251 {
252 // Very very simplified rasterizing algorithm
253 v2f bmin = { 0.f, 0.f };
254 v2f bmax = { rt->x, rt->y };
255
256 v2_minv( tri[0].co, tri[1].co, bmin );
257 v2_minv( tri[2].co, bmin, bmin );
258
259 v2_maxv( tri[0].co, tri[1].co, bmax );
260 v2_maxv( tri[2].co, bmax, bmax );
261
262 float range_x = (rt->bounds[1][0]-rt->bounds[0][0])/(float)rt->x;
263 float range_y = (rt->bounds[1][1]-rt->bounds[0][1])/(float)rt->y;
264
265 int start_x = csr_min( rt->x-1, csr_max( 0, floorf( (bmin[0]-rt->bounds[0][0])/range_x)));
266 int end_x = csr_max( 0, csr_min( rt->x-1, ceilf( (bmax[0]-rt->bounds[0][0])/range_x)));
267 int start_y = csr_min( rt->y-1, csr_max( 0, floorf( (bmin[1]-rt->bounds[0][1])/range_y)));
268 int end_y = csr_max( 0, csr_min( rt->y-1, ceilf( (bmax[1]-rt->bounds[0][1])/range_y)));
269
270 v2f v0, v1, v2, vp;
271 float d, bca = 0.f, bcb = 0.f, bcc = 0.f;
272
273 v2_sub( tri[1].co, tri[0].co, v0 );
274 v2_sub( tri[2].co, tri[0].co, v1 );
275 v2_sub( tri[1].co, tri[2].co, v2 );
276 d = 1.f / (v0[0]*v1[1] - v1[0]*v0[1]);
277
278 // Backface culling
279 if( v2_cross( v0, v1 ) > 0.f )
280 return;
281
282 v2f trace_origin;
283
284 for( u32 py = start_y; py <= end_y; py ++ )
285 {
286 trace_origin[1] = csr_lerpf( rt->bounds[0][1], rt->bounds[1][1], (float)py/(float)rt->y );
287
288 for( u32 px = start_x; px <= end_x; px ++ )
289 {
290 u32 sample_index = ((rt->y-py-1)*rt->x+px) * rt->num_samples;
291
292 void *frag = rt->colour + sample_index*rt->shader->stride;
293 float *depth = &rt->depth[ sample_index ];
294
295 trace_origin[0] = csr_lerpf( rt->bounds[0][0], rt->bounds[1][0], (float)px/(float)rt->x );
296
297 // Determine coverage
298 for( int i = 0; i < rt->num_samples; i ++ )
299 {
300 v3f sample_origin;
301
302 v2_add( rt->subsamples[ i ], trace_origin, sample_origin );
303 v2_sub( sample_origin, tri[0].co, vp );
304
305 if( v2_cross( v0, vp ) > 0.f )
306 continue;
307 if( v2_cross( vp, v1 ) > 0.f )
308 continue;
309
310 v2f vp2;
311 v2_sub( sample_origin, tri[2].co, vp2 );
312
313 if( v2_cross( vp2, v2 ) > 0.f )
314 continue;
315
316 bcb = (vp[0]*v1[1] - v1[0]*vp[1]) * d;
317 bcc = (v0[0]*vp[1] - vp[0]*v0[1]) * d;
318 bca = 1.f - bcb - bcc;
319
320 float hit = (tri[0].co[2] * bca + tri[1].co[2] * bcb + tri[2].co[2] * bcc) +16385.f;
321
322 if( hit > depth[i] )
323 {
324 depth[i] = hit;
325 rt->shader->frag( frag+i*rt->shader->stride, tri, bca, bcb, bcc );
326 }
327 }
328 }
329 }
330 }
331
332 void csr_draw( csr_target *rt, vmf_vert *triangles, u32 triangle_count, m4x3f transform )
333 {
334 m3x3f normal;
335 vmf_vert new_tri[3];
336
337 // Derive normal matrix
338 m4x3_to_3x3( transform, normal );
339 m3x3_inv_transpose( normal, normal );
340
341 for( u32 i = 0; i < triangle_count; i ++ )
342 {
343 vmf_vert *triangle = triangles + i*3;
344
345 m4x3_mulv( transform, triangle[0].co, new_tri[0].co );
346 m4x3_mulv( transform, triangle[1].co, new_tri[1].co );
347 m4x3_mulv( transform, triangle[2].co, new_tri[2].co );
348
349 m3x3_mulv( normal, triangle[0].nrm, new_tri[0].nrm );
350 m3x3_mulv( normal, triangle[1].nrm, new_tri[1].nrm );
351 m3x3_mulv( normal, triangle[2].nrm, new_tri[2].nrm );
352
353 v3_normalize( new_tri[0].nrm );
354 v3_normalize( new_tri[1].nrm );
355 v3_normalize( new_tri[2].nrm );
356
357 m4x3_mulv( transform, triangles[0].origin, new_tri[0].origin );
358
359 simple_raster( rt, new_tri );
360 }
361 }
362
363 void csr_vmf_render( csr_target *rt, vmf_map *map, vdf_node *root, csr_filter *filter, m4x3f prev, m4x3f inst )
364 {
365 m4x3f transform = M4X3_IDENTITY;
366 vmf_solid solid;
367 vmf_vert tri[3];
368 boxf trf_bounds;
369
370 u32 group_id = 0;
371 int filter_visgroups = 0, filter_classname = 0, compute_bounds_only = 0;
372
373 if( filter )
374 {
375 if( filter->visgroup )
376 {
377 filter_visgroups = 1;
378 group_id = vmf_visgroup_id( root, filter->visgroup );
379 }
380
381 if( filter->classname )
382 {
383 filter_classname = 1;
384 }
385
386 compute_bounds_only = filter->compute_bounds_only;
387 }
388
389 // Multiply previous transform with instance transform to create basis
390 if( prev )
391 {
392 m4x3_mul( prev, inst, transform );
393 }
394
395 // Gather world brushes
396 solidgen_ctx_init( &solid );
397
398 if( !filter_classname )
399 {
400 vdf_node *world = vdf_next( root, "world", NULL );
401
402 vdf_foreach( world, "solid", brush )
403 {
404 if( filter_visgroups && !vmf_visgroup_match( brush, group_id ) )
405 continue;
406
407 // TODO: heap-use-after-free
408 solidgen_push( &solid, brush );
409 }
410 }
411
412 // Actual entity loop
413 m4x3f model;
414
415 vdf_foreach( root, "entity", ent )
416 {
417 if( filter_visgroups && !vmf_visgroup_match( ent, group_id ) )
418 continue;
419
420 if( filter_classname )
421 if( strcmp( kv_get( ent, "classname", "" ), filter->classname ) )
422 continue;
423
424 if( ent->user & VMF_FLAG_IS_PROP )
425 {
426 // Create model transform
427 m4x3_identity( model );
428
429 vmf_entity_transform( ent, model );
430 m4x3_mul( transform, model, model );
431
432 // Draw model
433 mdl_mesh_t *mdl = &map->models[ ent->user1 ].mdl;
434
435 if( compute_bounds_only )
436 {
437 map->models[ ent->user1 ].need_load = 1;
438 m4x3_expand_aabb_point( model, rt->bounds, (v3f){0.f,0.f,0.f} );
439 }
440 else
441 {
442 for( int i = 0; i < mdl->num_indices/3; i ++ )
443 {
444 for( int j = 0; j < 3; j ++ )
445 {
446 v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8 ], tri[j].co );
447 v3_copy( &mdl->vertices[ mdl->indices[ i*3+j ] *8+3 ], tri[j].nrm );
448 v3_zero( tri[j].origin );
449 }
450
451 csr_draw( rt, tri, 1, model );
452 }
453 }
454 }
455 else if( ent->user & VMF_FLAG_IS_INSTANCE )
456 {
457 m4x3_identity( model );
458 vmf_entity_transform( ent, model );
459
460 csr_vmf_render( rt, map, map->cache[ ent->user1 ].root, filter, transform, model );
461 }
462 else
463 {
464 // Brush entity
465 vdf_foreach( ent, "solid", ent_solid )
466 {
467 solidgen_push( &solid, ent_solid );
468 }
469 }
470 }
471
472 if( compute_bounds_only )
473 {
474 solidgen_bounds( &solid, trf_bounds );
475 m4x3_transform_aabb( transform, trf_bounds );
476 box_concat( rt->bounds, trf_bounds );
477 }
478 else
479 {
480 // Draw brushes
481 for( int i = 0; i < csr_sb_count( solid.indices )/3; i ++ )
482 {
483 u32 * base = solid.indices + i*3;
484
485 tri[0] = solid.verts[ base[0] ];
486 tri[1] = solid.verts[ base[1] ];
487 tri[2] = solid.verts[ base[2] ];
488
489 csr_draw( rt, tri, 1, transform );
490 }
491 }
492
493 solidgen_ctx_reset( &solid );
494 solidgen_ctx_free( &solid );
495 }
496
497 // Obsolete
498 void csr_rt_save_buffers( csr_target *rt, const char *basename, const char *subname )
499 {
500 char output[ 512 ];
501
502 float *image = (float *)csr_malloc( 1024*1024*sizeof(float)*3 );
503
504 float contrib = 1.f/(float)rt->num_samples;
505
506 for( int l = 0; l < rt->x; l ++ )
507 {
508 for( int x = 0; x < rt->y; x ++ )
509 {
510 float *dst = &image[ (l*1024+x)*3 ];
511 void *src = rt->colour + ((1023-l)*1024+x) * rt->num_samples * rt->shader->stride;
512
513 v3_muls( (float *)src, contrib, dst );
514
515 for( int j = 1; j < rt->num_samples; j ++ )
516 {
517 v3_muladds( dst, (float *)(src + j*rt->shader->stride), contrib, dst );
518 }
519 }
520 }
521
522 // Save position buffer
523 strcpy( output, basename );
524 strcat( output, "." );
525 strcat( output, subname );
526 strcat( output, "_position.pfm" );
527 csr_32f_write( output, rt->x, rt->y, image );
528
529 free( image );
530 }
531
532 // Save floating point buffer to tga. Must be in range (0-1)
533 // Offset and stride are in bytes
534 void csr_rt_save_tga( csr_target *rt, const char *path, u32 offset, u32 nc )
535 {
536 u8 *image = (u8 *)csr_malloc( rt->x*rt->y * 4 );
537
538 float contrib = 255.f/(float)rt->num_samples;
539
540 for( int y = 0; y < rt->y; y ++ )
541 {
542 for( int x = 0; x < rt->x; x ++ )
543 {
544 u32 pixel_index = (y*rt->x + x);
545
546 void *src = rt->colour + offset + pixel_index * rt->num_samples * rt->shader->stride;
547 u8 *dst = image + pixel_index*4;
548
549 v4f accum = { 0.f, 0.f, 0.f, 0.f };
550
551 for( int k = 0; k < rt->num_samples; k ++ )
552 {
553 float *src_sample = (float *)(src + k*rt->shader->stride);
554
555 for( int j = 0; j < nc; j ++ )
556 {
557 accum[ j ] += src_sample[ j ] * contrib;
558 }
559 }
560
561 // TODO: Clamp this value
562 dst[0] = accum[0];
563 dst[1] = accum[1];
564 dst[2] = accum[2];
565 dst[3] = accum[3];
566 }
567 }
568
569 csr_tga_write( path, rt->x, rt->y, nc, image );
570 free( image );
571 }
572
573 #endif