1 typedef struct csr_frag csr_frag
;
2 typedef struct csr_target csr_target
;
3 typedef struct csr_filter csr_filter
;
4 typedef enum EMSAA EMSAA
;
6 typedef void (* csr_frag_shader
)( v4f
, vmf_vert
[3], float, float, float );
18 { 0x0.4p0f
, 0x0.4p0f
},
19 { 0x0.4p0f
, -0x0.4p0f
},
20 { -0x0.4p0f
, -0x0.4p0f
},
21 { -0x0.4p0f
, 0x0.4p0f
}
28 v2f csr_msaa_2x2rgss
[] =
30 { 0x0.2p0f
, 0x0.6p0f
},
31 { -0x0.6p0f
, 0x0.2p0f
},
32 { -0x0.2p0f
, -0x0.6p0f
},
33 { 0x0.6p0f
, -0x0.2p0f
}
44 v2f csr_msaa_8rook
[] =
46 { 0x0.1p0f
, 0x0.7p0f
},
47 { 0x0.5p0f
, 0x0.1p0f
},
48 { 0x0.7p0f
, -0x0.3p0f
},
49 { 0x0.3p0f
, -0x0.5p0f
},
50 { -0x0.1p0f
, -0x0.7p0f
},
51 { -0x0.5p0f
, -0x0.1p0f
},
52 { -0x0.7p0f
, 0x0.3p0f
},
53 { -0x0.3p0f
, 0x0.5p0f
}
74 csr_frag_shader shader
;
77 void csr_use_program( csr_target
*rt
, csr_frag_shader shader
)
84 const char *visgroup
; // Limit to this visgroup only
85 const char *classname
; // Limit to this exact classname. will not draw world
87 int compute_bounds_only
;
98 void csr_create_target( csr_target
*rt
, u32 x
, u32 y
, EMSAA aa
)
108 rt
->sample_src
= csr_msaa_1
;
113 rt
->sample_src
= csr_msaa_2x2
;
118 rt
->sample_src
= csr_msaa_2x2rgss
;
123 rt
->sample_src
= csr_msaa_8rook
;
127 rt
->fragments
= (csr_frag
*)csr_malloc( x
*y
*sizeof(csr_frag
)*rt
->num_samples
);
129 v3_fill( rt
->bounds
[0], INFINITY
);
130 v3_fill( rt
->bounds
[1], -INFINITY
);
133 void csr_update_subsamples( csr_target
*rt
)
135 float range_x
= (rt
->bounds
[1][0]-rt
->bounds
[0][0]);
136 float range_y
= (rt
->bounds
[1][1]-rt
->bounds
[0][1]);
138 v2f pixel_size
= { range_x
/(float)rt
->x
, range_y
/(float)rt
->y
};
140 for( int i
= 0; i
< rt
->num_samples
; i
++ )
142 v2_mul( rt
->sample_src
[i
], pixel_size
, rt
->subsamples
[i
] );
146 void csr_rt_free( csr_target
*rt
)
148 free( rt
->fragments
);
151 void csr_rt_clear( csr_target
*rt
)
153 for( u32 i
= 0; i
< rt
->x
*rt
->y
*rt
->num_samples
; i
++ )
155 v4_zero( rt
->fragments
[ i
].colour
);
156 rt
->fragments
[i
].depth
= 0.f
;
160 void csr_auto_fit( csr_target
*rt
, float padding
)
162 // Correct aspect ratio to be square
163 float dx
, dy
, l
, cx
, cy
;
164 dx
= rt
->bounds
[1][0] - rt
->bounds
[0][0];
165 dy
= rt
->bounds
[1][1] - rt
->bounds
[0][1];
167 l
= fmaxf( dx
, dy
) * .5f
;
169 cx
= (rt
->bounds
[1][0] + rt
->bounds
[0][0]) * .5f
;
170 cy
= (rt
->bounds
[1][1] + rt
->bounds
[0][1]) * .5f
;
172 rt
->bounds
[0][0] = cx
- l
- padding
;
173 rt
->bounds
[1][0] = cx
+ l
+ padding
;
174 rt
->bounds
[0][1] = cy
- l
- padding
;
175 rt
->bounds
[1][1] = cy
+ l
+ padding
;
177 rt
->scale
= l
+ padding
;
179 csr_update_subsamples( rt
);
182 void csr_write_txt( char const *path
, const char *name
, csr_target
*rt
)
186 write_ptr
= fopen( path
, "w" );
188 fprintf( write_ptr
, "\"%s\"\n\{\n", name
);
189 fprintf( write_ptr
, "\t\"material\" \"overviews/%s\"\n", name
);
190 fprintf( write_ptr
, "\t\"pos_x\" \"%.8f\"\n", rt
->bounds
[0][0] );
191 fprintf( write_ptr
, "\t\"pos_y\" \"%.8f\"\n", rt
->bounds
[0][1] );
192 fprintf( write_ptr
, "\t\"scale\" \"%.8f\"\n", rt
->scale
/ (float)rt
->x
);
193 fprintf( write_ptr
, "}\n" );
198 void frag_gbuffer( v4f frag_colour
, vmf_vert tri
[3], float bca
, float bcb
, float bcc
)
200 v3_muls( tri
[0].co
, bca
, frag_colour
);
201 v3_muladds( frag_colour
, tri
[1].co
, bcb
, frag_colour
);
202 v3_muladds( frag_colour
, tri
[2].co
, bcc
, frag_colour
);
205 void simple_raster( csr_target
*rt
, vmf_vert tri
[3] )
207 // Very very simplified rasterizing algorithm
208 v2f bmin
= { 0.f
, 0.f
};
209 v2f bmax
= { rt
->x
, rt
->y
};
211 v2_minv( tri
[0].co
, tri
[1].co
, bmin
);
212 v2_minv( tri
[2].co
, bmin
, bmin
);
214 v2_maxv( tri
[0].co
, tri
[1].co
, bmax
);
215 v2_maxv( tri
[2].co
, bmax
, bmax
);
217 float range_x
= (rt
->bounds
[1][0]-rt
->bounds
[0][0])/(float)rt
->x
;
218 float range_y
= (rt
->bounds
[1][1]-rt
->bounds
[0][1])/(float)rt
->y
;
220 int start_x
= csr_min( rt
->x
-1, csr_max( 0, floorf( (bmin
[0]-rt
->bounds
[0][0])/range_x
)));
221 int end_x
= csr_max( 0, csr_min( rt
->x
-1, ceilf( (bmax
[0]-rt
->bounds
[0][0])/range_x
)));
222 int start_y
= csr_min( rt
->y
-1, csr_max( 0, floorf( (bmin
[1]-rt
->bounds
[0][1])/range_y
)));
223 int end_y
= csr_max( 0, csr_min( rt
->y
-1, ceilf( (bmax
[1]-rt
->bounds
[0][1])/range_y
)));
226 float d
, bca
= 0.f
, bcb
= 0.f
, bcc
= 0.f
;
228 v2_sub( tri
[1].co
, tri
[0].co
, v0
);
229 v2_sub( tri
[2].co
, tri
[0].co
, v1
);
230 v2_sub( tri
[1].co
, tri
[2].co
, v2
);
231 d
= 1.f
/ (v0
[0]*v1
[1] - v1
[0]*v0
[1]);
234 if( v2_cross( v0
, v1
) > 0.f
)
239 for( u32 py
= start_y
; py
<= end_y
; py
++ )
241 trace_origin
[1] = csr_lerpf( rt
->bounds
[0][1], rt
->bounds
[1][1], (float)py
/(float)rt
->y
);
243 for( u32 px
= start_x
; px
<= end_x
; px
++ )
245 csr_frag
*frag
= &rt
->fragments
[ (py
* rt
->y
+ px
) * rt
->num_samples
];
247 trace_origin
[0] = csr_lerpf( rt
->bounds
[0][0], rt
->bounds
[1][0], (float)px
/(float)rt
->x
);
249 // Determine coverage
250 for( int i
= 0; i
< rt
->num_samples
; i
++ )
254 v2_add( rt
->subsamples
[ i
], trace_origin
, sample_origin
);
255 v2_sub( sample_origin
, tri
[0].co
, vp
);
257 if( v2_cross( v0
, vp
) > 0.f
)
259 if( v2_cross( vp
, v1
) > 0.f
)
263 v2_sub( sample_origin
, tri
[2].co
, vp2
);
265 if( v2_cross( vp2
, v2
) > 0.f
)
268 bcb
= (vp
[0]*v1
[1] - v1
[0]*vp
[1]) * d
;
269 bcc
= (v0
[0]*vp
[1] - vp
[0]*v0
[1]) * d
;
270 bca
= 1.f
- bcb
- bcc
;
272 float hit
= (tri
[0].co
[2] * bca
+ tri
[1].co
[2] * bcb
+ tri
[2].co
[2] * bcc
) +16385.f
;
274 if( hit
> frag
[i
].depth
)
277 rt
->shader( frag
[i
].colour
, tri
, bca
, bcb
, bcc
);
284 void csr_draw( csr_target
*rt
, vmf_vert
*triangles
, u32 triangle_count
, m4x3f transform
)
289 // Derive normal matrix
290 m4x3_to_3x3( transform
, normal
);
292 // NOTE: This isn't strictly necessary since CS:GO only uses uniform scaling.
293 m3x3_inv_transpose( normal
, normal
);
295 for( u32 i
= 0; i
< triangle_count
; i
++ )
297 vmf_vert
*triangle
= triangles
+ i
*3;
299 m4x3_mulv( transform
, triangle
[0].co
, new_tri
[0].co
);
300 m4x3_mulv( transform
, triangle
[1].co
, new_tri
[1].co
);
301 m4x3_mulv( transform
, triangle
[2].co
, new_tri
[2].co
);
302 m3x3_mulv( normal
, triangle
[0].nrm
, new_tri
[0].nrm
);
303 m3x3_mulv( normal
, triangle
[1].nrm
, new_tri
[1].nrm
);
304 m3x3_mulv( normal
, triangle
[2].nrm
, new_tri
[2].nrm
);
306 simple_raster( rt
, new_tri
);
310 void draw_vmf_group( csr_target
*rt
, vmf_map
*map
, vdf_node
*root
, csr_filter
*filter
, m4x3f prev
, m4x3f inst
)
312 m4x3f transform
= M4X3_IDENTITY
;
318 int filter_visgroups
= 0, filter_classname
= 0, compute_bounds_only
= 0;
322 if( filter
->visgroup
)
324 filter_visgroups
= 1;
325 group_id
= vmf_visgroup_id( root
, filter
->visgroup
);
328 if( filter
->classname
)
330 filter_classname
= 1;
333 compute_bounds_only
= filter
->compute_bounds_only
;
336 // Multiply previous transform with instance transform to create basis
339 m4x3_mul( prev
, inst
, transform
);
342 // Gather world brushes
343 solidgen_ctx_init( &solid
);
345 if( !filter_classname
)
347 vdf_node
*world
= vdf_next( root
, "world", NULL
);
349 vdf_foreach( world
, "solid", brush
)
351 if( filter_visgroups
&& !vmf_visgroup_match( brush
, group_id
) )
354 // TODO: heap-use-after-free
355 solidgen_push( &solid
, brush
);
359 // Actual entity loop
362 vdf_foreach( root
, "entity", ent
)
364 if( filter_visgroups
&& !vmf_visgroup_match( ent
, group_id
) )
367 if( filter_classname
)
368 if( strcmp( kv_get( ent
, "classname", "" ), filter
->classname
) )
371 if( ent
->user
& VMF_FLAG_IS_PROP
)
373 // Create model transform
374 m4x3_identity( model
);
376 vmf_entity_transform( ent
, model
);
377 m4x3_mul( transform
, model
, model
);
380 mdl_mesh_t
*mdl
= &map
->models
[ ent
->user1
].mdl
;
382 if( compute_bounds_only
)
384 map
->models
[ ent
->user1
].need_load
= 1;
385 m4x3_expand_aabb_point( model
, rt
->bounds
, (v3f
){0.f
,0.f
,0.f
} );
389 for( int i
= 0; i
< mdl
->num_indices
/3; i
++ )
391 for( int j
= 0; j
< 3; j
++ )
393 v3_copy( &mdl
->vertices
[ mdl
->indices
[ i
*3+j
] *8 ], tri
[j
].co
);
394 v3_copy( &mdl
->vertices
[ mdl
->indices
[ i
*3+j
] *8+3 ], tri
[j
].nrm
);
399 csr_draw( rt
, tri
, 1, model
);
403 else if( ent
->user
& VMF_FLAG_IS_INSTANCE
)
405 m4x3_identity( model
);
406 vmf_entity_transform( ent
, model
);
408 draw_vmf_group( rt
, map
, map
->cache
[ ent
->user1
].root
, filter
, transform
, model
);
413 vdf_foreach( ent
, "solid", ent_solid
)
415 solidgen_push( &solid
, ent_solid
);
420 if( compute_bounds_only
)
422 solidgen_bounds( &solid
, trf_bounds
);
423 m4x3_transform_aabb( transform
, trf_bounds
);
424 box_concat( rt
->bounds
, trf_bounds
);
429 for( int i
= 0; i
< csr_sb_count( solid
.indices
)/3; i
++ )
431 u32
* base
= solid
.indices
+ i
*3;
433 tri
[0] = solid
.verts
[ base
[0] ];
434 tri
[1] = solid
.verts
[ base
[1] ];
435 tri
[2] = solid
.verts
[ base
[2] ];
437 csr_draw( rt
, tri
, 1, transform
);
441 solidgen_ctx_reset( &solid
);
442 solidgen_ctx_free( &solid
);
445 void csr_rt_save_buffers( csr_target
*rt
, const char *basename
, const char *subname
)
449 float *image
= (float *)csr_malloc( 1024*1024*sizeof(float)*3 );
451 float contrib
= 1.f
/(float)rt
->num_samples
;
453 for( int l
= 0; l
< rt
->x
; l
++ )
455 for( int x
= 0; x
< rt
->y
; x
++ )
457 float *dst
= &image
[ (l
*1024+x
)*3 ];
458 csr_frag
*src
= &rt
->fragments
[ ((1023-l
)*1024+x
)*rt
->num_samples
];
461 v3_muls( src
[0].colour
, contrib
, dst
);
463 for( int j
= 1; j
< rt
->num_samples
; j
++ )
465 v3_muladds( dst
, src
[j
].colour
, contrib
, dst
);
470 // Save position buffer
471 strcpy( output
, basename
);
472 strcat( output
, "." );
473 strcat( output
, subname
);
474 strcat( output
, "_position.pfm" );
475 csr_32f_write( output
, rt
->x
, rt
->y
, image
);