+# Kind of a useless thing i made but it looks cool and adds complexity!!1
+#
+def encoder_graph_iterator( root ):
+#{
+ for c in root['children']:
+ #{
+ yield c
+ yield from encoder_graph_iterator(c)
+ #}
+#}
+
+
+# Push a vertex into the model file, or return a cached index (c_uint32)
+#
+def encoder_vertex_push( vertex_reference, co,norm,uv,colour,groups,weights ):
+#{
+ global g_encoder
+ buffer = g_encoder['data']['vertex']
+
+ TOLERENCE = 4
+ m = float(10**TOLERENCE)
+
+ # Would be nice to know if this can be done faster than it currently runs,
+ # its quite slow.
+ #
+ key = (int(co[0]*m+0.5),
+ int(co[1]*m+0.5),
+ int(co[2]*m+0.5),
+ int(norm[0]*m+0.5),
+ int(norm[1]*m+0.5),
+ int(norm[2]*m+0.5),
+ int(uv[0]*m+0.5),
+ int(uv[1]*m+0.5),
+ colour[0]*m+0.5, # these guys are already quantized
+ colour[1]*m+0.5, # .
+ colour[2]*m+0.5, # .
+ colour[3]*m+0.5, # .
+ weights[0]*m+0.5, # v
+ weights[1]*m+0.5,
+ weights[2]*m+0.5,
+ weights[3]*m+0.5,
+ groups[0]*m+0.5,
+ groups[1]*m+0.5,
+ groups[2]*m+0.5,
+ groups[3]*m+0.5)
+
+ if key in vertex_reference:
+ return vertex_reference[key]
+ else:
+ #{
+ index = c_uint32( len(vertex_reference) )
+ vertex_reference[key] = index
+
+ v = mdl_vert()
+ v.co[0] = co[0]
+ v.co[1] = co[2]
+ v.co[2] = -co[1]
+ v.norm[0] = norm[0]
+ v.norm[1] = norm[2]
+ v.norm[2] = -norm[1]
+ v.uv[0] = uv[0]
+ v.uv[1] = uv[1]
+ v.colour[0] = colour[0]
+ v.colour[1] = colour[1]
+ v.colour[2] = colour[2]
+ v.colour[3] = colour[3]
+ v.weights[0] = weights[0]
+ v.weights[1] = weights[1]
+ v.weights[2] = weights[2]
+ v.weights[3] = weights[3]
+ v.groups[0] = groups[0]
+ v.groups[1] = groups[1]
+ v.groups[2] = groups[2]
+ v.groups[3] = groups[3]
+
+ buffer += [v]
+ return index
+ #}
+#}
+
+
+# Compile a mesh (or use one from the cache) onto node, based on node_def
+# No return value
+#
+def encoder_compile_mesh( node, node_def ):
+#{
+ global g_encoder
+
+ graph = g_encoder['scene_graph']
+ graph_lookup = g_encoder['graph_lookup']
+ mesh_cache = g_encoder['mesh_cache']
+ obj = node_def['obj']
+ armature_def = None
+ can_use_cache = True
+
+ # Check for modifiers that typically change the data per-instance
+ # there is no well defined rule for the choices here, its just what i've
+ # needed while producing the game.
+ #
+ # It may be possible to detect these cases automatically.
+ #
+ for mod in obj.modifiers:
+ #{
+ if mod.type == 'DATA_TRANSFER' or mod.type == 'SHRINKWRAP' or \
+ mod.type == 'BOOLEAN' or mod.type == 'CURVE' or \
+ mod.type == 'ARRAY':
+ #{
+ can_use_cache = False
+ #}
+
+ if mod.type == 'ARMATURE':
+ armature_def = graph_lookup[mod.object]
+
+ # Check the cache first
+ #
+ if can_use_cache and (obj.data.name in mesh_cache):
+ #{
+ ref = mesh_cache[obj.data.name]
+ node.submesh_start = ref.submesh_start
+ node.submesh_count = ref.submesh_count
+ return
+ #}
+
+ # Compile a whole new mesh
+ #
+ node.submesh_start = len( g_encoder['data']['submesh'] )
+ node.submesh_count = 0
+
+ default_mat = c_uint32(12345)
+ default_mat.name = ""
+
+ dgraph = bpy.context.evaluated_depsgraph_get()
+ data = obj.evaluated_get(dgraph).data
+ data.calc_loop_triangles()
+ data.calc_normals_split()
+
+ # Mesh is split into submeshes based on their material
+ #
+ mat_list = data.materials if len(data.materials) > 0 else [default_mat]
+ for material_id, mat in enumerate(mat_list):
+ #{
+ mref = {}
+
+ sm = mdl_submesh()
+ sm.indice_start = len( g_encoder['data']['indice'] )
+ sm.vertex_start = len( g_encoder['data']['vertex'] )
+ sm.vertex_count = 0
+ sm.indice_count = 0
+ sm.material_id = encoder_process_material( mat )
+
+ for i in range(3):
+ #{
+ sm.bbx[0][i] = 999999
+ sm.bbx[1][i] = -999999
+ #}
+
+ # Keep a reference to very very very similar vertices
+ #
+ vertex_reference = {}
+
+ # Write the vertex / indice data
+ #
+ for tri_index, tri in enumerate(data.loop_triangles):
+ #{
+ if tri.material_index != material_id:
+ continue
+
+ for j in range(3):
+ #{
+ vert = data.vertices[tri.vertices[j]]
+ li = tri.loops[j]
+ vi = data.loops[li].vertex_index
+
+ # Gather vertex information
+ #
+ co = vert.co
+ norm = data.loops[li].normal
+ uv = (0,0)
+ colour = (255,255,255,255)
+ groups = [0,0,0,0]
+ weights = [0,0,0,0]
+
+ # Uvs
+ #
+ if data.uv_layers:
+ uv = data.uv_layers.active.data[li].uv
+
+ # Vertex Colours
+ #
+ if data.vertex_colors:
+ #{
+ colour = data.vertex_colors.active.data[li].color
+ colour = (int(colour[0]*255.0),\
+ int(colour[1]*255.0),\
+ int(colour[2]*255.0),\
+ int(colour[3]*255.0))
+ #}
+
+ # Weight groups: truncates to the 3 with the most influence. The
+ # fourth bone ID is never used by the shader so it is
+ # always 0
+ #
+ if armature_def:
+ #{
+ src_groups = [_ for _ in data.vertices[vi].groups \
+ if obj.vertex_groups[_.group].name in \
+ armature_def['bones']]
+
+ weight_groups = sorted( src_groups, key = \
+ lambda a: a.weight, reverse=True )
+ tot = 0.0
+ for ml in range(3):
+ #{
+ if len(weight_groups) > ml:
+ #{
+ g = weight_groups[ml]
+ name = obj.vertex_groups[g.group].name
+ weight = g.weight
+
+ weights[ml] = weight
+ groups[ml] = armature_def['bones'].index(name)
+ tot += weight
+ #}
+ #}
+
+ if len(weight_groups) > 0:
+ #{
+ inv_norm = (1.0/tot) * 65535.0
+ for ml in range(3):
+ #{
+ weights[ml] = int( weights[ml] * inv_norm )
+ weights[ml] = min( weights[ml], 65535 )
+ weights[ml] = max( weights[ml], 0 )
+ #}
+ #}
+
+ # Add vertex and expand bound box
+ #
+ index = encoder_vertex_push( vertex_reference, co, \
+ norm, \
+ uv, \
+ colour, \
+ groups, \
+ weights )
+ g_encoder['data']['indice'] += [index]
+ #}
+ #}