- TOLERENCE = 4
- m = float(10**TOLERENCE)
-
- # Would be nice to know if this can be done faster than it currently runs,
- # its quite slow.
- #
- key = (int(co[0]*m+0.5),
- int(co[1]*m+0.5),
- int(co[2]*m+0.5),
- int(norm[0]*m+0.5),
- int(norm[1]*m+0.5),
- int(norm[2]*m+0.5),
- int(uv[0]*m+0.5),
- int(uv[1]*m+0.5),
- colour[0], # these guys are already quantized
- colour[1], # .
- colour[2], # .
- colour[3], # .
- weights[0], # v
- weights[1],
- weights[2],
- weights[3],
- groups[0],
- groups[1],
- groups[2],
- groups[3])
-
- if key in vertex_reference:
- return vertex_reference[key]
- else:
- #{
- index = c_uint32( len(vertex_reference) )
- vertex_reference[key] = index
-
- v = mdl_vert()
- v.co[0] = co[0]
- v.co[1] = co[2]
- v.co[2] = -co[1]
- v.norm[0] = norm[0]
- v.norm[1] = norm[2]
- v.norm[2] = -norm[1]
- v.uv[0] = uv[0]
- v.uv[1] = uv[1]
- v.colour[0] = colour[0]
- v.colour[1] = colour[1]
- v.colour[2] = colour[2]
- v.colour[3] = colour[3]
- v.weights[0] = weights[0]
- v.weights[1] = weights[1]
- v.weights[2] = weights[2]
- v.weights[3] = weights[3]
- v.groups[0] = groups[0]
- v.groups[1] = groups[1]
- v.groups[2] = groups[2]
- v.groups[3] = groups[3]
-
- buffer += [v]
- return index
- #}
-#}
-
-
-# Compile a mesh (or use one from the cache) onto node, based on node_def
-# No return value
-#
-def encoder_compile_mesh( node, node_def ):
+ dij = create_node_graph( route_curves, route_gates )
+
+ for obj in routes:#{
+ obj_data = obj.SR_data.ent_route[0]
+ route = ent_route()
+ route.pstr_name = sr_compile_string( obj_data.alias )
+ route.checkpoints_start = checkpoint_count
+ route.checkpoints_count = 0
+
+ for ci in range(3):
+ route.colour[ci] = obj_data.colour[ci]
+ route.colour[3] = 1.0
+
+ compile_obj_transform( obj, route.transform )
+ checkpoints = obj_data.gates
+
+ for i in range(len(checkpoints)):#{
+ gi = checkpoints[i].target
+ gj = checkpoints[(i+1)%len(checkpoints)].target
+ gate = gi
+
+ if gi:#{
+ dest = gi.SR_data.ent_gate[0].target
+ gi = dest
+ #}
+
+ if gi==gj: continue # error?
+ if not gi or not gj: continue
+
+ checkpoint = ent_checkpoint()
+ checkpoint.gate_index = sr_compile.entity_ids[gate.name]
+ checkpoint.path_start = pathindice_count
+ checkpoint.path_count = 0
+
+ path = solve_graph( dij, gi.name, gj.name )
+
+ if path:#{
+ for pi in range(len(path)):#{
+ pathindice = ent_path_index()
+ pathindice.index = routenode_count + path[pi]
+ sr_ent_push( pathindice )
+
+ checkpoint.path_count += 1
+ pathindice_count += 1
+ #}
+ #}
+
+ sr_ent_push( checkpoint )
+ route.checkpoints_count += 1
+ checkpoint_count += 1
+ #}
+
+ sr_ent_push( route )
+ #}
+
+ for point in dij.points:#{
+ rn = ent_route_node()
+ rn.co[0] = point[0]
+ rn.co[1] = point[2]
+ rn.co[2] = -point[1]
+ sr_ent_push( rn )
+ #}
+
+ routenode_count += len(dij.points)
+ #}
+
+
+ print( F"[SR] Writing file" )
+
+ file_array_instructions = {}
+ file_offset = 0
+
+ def _write_array( name, item_size, data ):#{
+ nonlocal file_array_instructions, file_offset
+
+ count = len(data)//item_size
+ file_array_instructions[name] = {'count':count, 'size':item_size,\
+ 'data':data, 'offset': file_offset}
+ file_offset += len(data)
+ file_offset = int_align_to( file_offset, 8 )
+ #}
+
+ _write_array( 'strings', 1, sr_compile.string_data )
+ _write_array( 'mdl_mesh', sizeof(mdl_mesh), sr_compile.mesh_data )
+ _write_array( 'mdl_submesh', sizeof(mdl_submesh), sr_compile.submesh_data )
+ _write_array( 'mdl_material', sizeof(mdl_material), sr_compile.material_data)
+ _write_array( 'mdl_texture', sizeof(mdl_texture), sr_compile.texture_data)
+ _write_array( 'mdl_armature', sizeof(mdl_armature), sr_compile.armature_data)
+ _write_array( 'mdl_bone', sizeof(mdl_bone), sr_compile.bone_data )
+
+ for name, buffer in sr_compile.entity_data.items():#{
+ _write_array( name, sr_compile.entity_info[name]['size'], buffer )
+ #}
+
+ _write_array( 'mdl_animation', sizeof(mdl_animation), sr_compile.anim_data)
+ _write_array( 'mdl_keyframe', sizeof(mdl_transform),sr_compile.keyframe_data)
+ _write_array( 'mdl_vert', sizeof(mdl_vert), sr_compile.vertex_data )
+ _write_array( 'mdl_indice', sizeof(c_uint32), sr_compile.indice_data )
+ _write_array( 'pack', 1, sr_compile.pack_data )
+
+ header_size = int_align_to( sizeof(mdl_header), 8 )
+ index_size = int_align_to( sizeof(mdl_array)*len(file_array_instructions),8 )
+
+ folder = bpy.path.abspath(bpy.context.scene.SR_data.export_dir)
+ path = F"{folder}{collection.name}.mdl"
+ print( path )
+
+ fp = open( path, "wb" )
+ header = mdl_header()
+ header.version = 40
+ sr_array_title( header.arrays, \
+ 'index', len(file_array_instructions), \
+ sizeof(mdl_array), header_size )
+
+ fp.write( bytearray_align_to( bytearray(header), 8 ) )
+
+ print( F'[SR] {"name":>16}| count | offset' )
+ index = bytearray()
+ for name,info in file_array_instructions.items():#{
+ arr = mdl_array()
+ offset = info['offset'] + header_size + index_size
+ sr_array_title( arr, name, info['count'], info['size'], offset )
+ index.extend( bytearray(arr) )
+
+ print( F'[SR] {name:>16}| {info["count"]: 8} '+\
+ F' 0x{info["offset"]:02x}' )
+ #}
+ fp.write( bytearray_align_to( index, 8 ) )
+ #bytearray_print_hex( index )
+
+ for name,info in file_array_instructions.items():#{
+ fp.write( bytearray_align_to( info['data'], 8 ) )
+ #}
+
+ fp.close()
+
+ print( '[SR] done' )
+#}
+
+class SR_SCENE_SETTINGS(bpy.types.PropertyGroup):