+ # Here also collects some information about constraints, ik and
+ # counts colliders for the armature.
+ #
+ def _extendb( p, n, d ):
+ #{
+ nonlocal tree
+
+ btree = {}
+ btree["bone"] = n
+ btree["linked_armature"] = tree
+ btree["uid"] = _new_uid()
+ btree["children"] = []
+ btree["depth"] = d
+ btree["parent"] = p
+ tree["bones"] += [n.name]
+
+ for c in n.children:
+ #{
+ _extendb( btree, c, d+1 )
+ #}
+
+ for c in tree['obj'].pose.bones[n.name].constraints:
+ #{
+ if c.type == 'IK':
+ #{
+ btree["ik_target"] = c.subtarget
+ btree["ik_pole"] = c.pole_subtarget
+ tree["ik_count"] += 1
+ #}
+ #}
+
+ if n.cv_data.collider != 'collider_none':
+ tree['collider_count'] += 1
+
+ btree['deform'] = n.use_deform
+ p['children'] += [btree]
+ #}
+
+ for b in n.data.bones:
+ if not b.parent:
+ _extendb( tree, b, d+1 )
+ #}
+
+ # Recurse into children of this object
+ #
+ for obj1 in n.children:
+ #{
+ nonlocal collection
+ for c1 in obj1.users_collection:
+ #{
+ if c1 == collection:
+ #{
+ _extend( tree, obj1, d+1 )
+ break
+ #}
+ #}
+ #}
+
+ p["children"] += [tree]
+ graph_lookup[n] = tree
+
+ #}
+
+ _extend( graph, obj, 1 )
+
+ #}
+#}
+
+
+# Kind of a useless thing i made but it looks cool and adds complexity!!1
+#
+def encoder_graph_iterator( root ):
+#{
+ for c in root['children']:
+ #{
+ yield c
+ yield from encoder_graph_iterator(c)
+ #}
+#}
+
+
+# Push a vertex into the model file, or return a cached index (c_uint32)
+#
+def encoder_vertex_push( vertex_reference, co,norm,uv,colour,groups,weights ):
+#{
+ global g_encoder
+ buffer = g_encoder['data']['vertex']
+
+ TOLERENCE = 4
+ m = float(10**TOLERENCE)
+
+ # Would be nice to know if this can be done faster than it currently runs,
+ # its quite slow.
+ #
+ key = (int(co[0]*m+0.5),
+ int(co[1]*m+0.5),
+ int(co[2]*m+0.5),
+ int(norm[0]*m+0.5),
+ int(norm[1]*m+0.5),
+ int(norm[2]*m+0.5),
+ int(uv[0]*m+0.5),
+ int(uv[1]*m+0.5),
+ colour[0], # these guys are already quantized
+ colour[1], # .
+ colour[2], # .
+ colour[3], # .
+ weights[0], # v
+ weights[1],
+ weights[2],
+ weights[3],
+ groups[0],
+ groups[1],
+ groups[2],
+ groups[3])
+
+ if key in vertex_reference:
+ return vertex_reference[key]
+ else:
+ #{
+ index = c_uint32( len(vertex_reference) )
+ vertex_reference[key] = index
+
+ v = mdl_vert()
+ v.co[0] = co[0]
+ v.co[1] = co[2]
+ v.co[2] = -co[1]
+ v.norm[0] = norm[0]
+ v.norm[1] = norm[2]
+ v.norm[2] = -norm[1]
+ v.uv[0] = uv[0]
+ v.uv[1] = uv[1]
+ v.colour[0] = colour[0]
+ v.colour[1] = colour[1]
+ v.colour[2] = colour[2]
+ v.colour[3] = colour[3]
+ v.weights[0] = weights[0]
+ v.weights[1] = weights[1]
+ v.weights[2] = weights[2]
+ v.weights[3] = weights[3]
+ v.groups[0] = groups[0]
+ v.groups[1] = groups[1]
+ v.groups[2] = groups[2]
+ v.groups[3] = groups[3]
+
+ buffer += [v]
+ return index
+ #}
+#}
+
+
+# Compile a mesh (or use one from the cache) onto node, based on node_def
+# No return value
+#
+def encoder_compile_mesh( node, node_def ):
+#{
+ global g_encoder
+
+ graph = g_encoder['scene_graph']
+ graph_lookup = g_encoder['graph_lookup']
+ mesh_cache = g_encoder['mesh_cache']
+ obj = node_def['obj']
+ armature_def = None
+ can_use_cache = True
+
+ # Check for modifiers that typically change the data per-instance
+ # there is no well defined rule for the choices here, its just what i've
+ # needed while producing the game.
+ #
+ # It may be possible to detect these cases automatically.
+ #
+ for mod in obj.modifiers:
+ #{
+ if mod.type == 'DATA_TRANSFER' or mod.type == 'SHRINKWRAP' or \
+ mod.type == 'BOOLEAN' or mod.type == 'CURVE' or \
+ mod.type == 'ARRAY':
+ #{
+ can_use_cache = False
+ #}
+
+ if mod.type == 'ARMATURE':
+ armature_def = graph_lookup[mod.object]
+
+ # Check the cache first
+ #
+ if can_use_cache and (obj.data.name in mesh_cache):
+ #{
+ ref = mesh_cache[obj.data.name]
+ node.submesh_start = ref.submesh_start
+ node.submesh_count = ref.submesh_count
+ return
+ #}
+
+ # Compile a whole new mesh
+ #
+ node.submesh_start = len( g_encoder['data']['submesh'] )
+ node.submesh_count = 0
+
+ dgraph = bpy.context.evaluated_depsgraph_get()
+ data = obj.evaluated_get(dgraph).data
+ data.calc_loop_triangles()
+ data.calc_normals_split()
+
+ # Mesh is split into submeshes based on their material
+ #
+ mat_list = data.materials if len(data.materials) > 0 else [None]
+ for material_id, mat in enumerate(mat_list):
+ #{
+ mref = {}
+
+ sm = mdl_submesh()
+ sm.indice_start = len( g_encoder['data']['indice'] )
+ sm.vertex_start = len( g_encoder['data']['vertex'] )
+ sm.vertex_count = 0
+ sm.indice_count = 0
+ sm.material_id = encoder_process_material( mat )
+
+ for i in range(3):
+ #{
+ sm.bbx[0][i] = 999999
+ sm.bbx[1][i] = -999999
+ #}
+
+ # Keep a reference to very very very similar vertices
+ #
+ vertex_reference = {}
+
+ # Write the vertex / indice data
+ #
+ for tri_index, tri in enumerate(data.loop_triangles):
+ #{
+ if tri.material_index != material_id:
+ continue
+
+ for j in range(3):
+ #{
+ vert = data.vertices[tri.vertices[j]]
+ li = tri.loops[j]
+ vi = data.loops[li].vertex_index
+
+ # Gather vertex information
+ #
+ co = vert.co
+ norm = data.loops[li].normal
+ uv = (0,0)
+ colour = (255,255,255,255)
+ groups = [0,0,0,0]
+ weights = [0,0,0,0]
+
+ # Uvs
+ #
+ if data.uv_layers:
+ uv = data.uv_layers.active.data[li].uv
+
+ # Vertex Colours
+ #
+ if data.vertex_colors:
+ #{
+ colour = data.vertex_colors.active.data[li].color
+ colour = (int(colour[0]*255.0),\
+ int(colour[1]*255.0),\
+ int(colour[2]*255.0),\
+ int(colour[3]*255.0))
+ #}
+
+ # Weight groups: truncates to the 3 with the most influence. The
+ # fourth bone ID is never used by the shader so it is
+ # always 0
+ #
+ if armature_def:
+ #{
+ src_groups = [_ for _ in data.vertices[vi].groups \
+ if obj.vertex_groups[_.group].name in \
+ armature_def['bones']]
+
+ weight_groups = sorted( src_groups, key = \
+ lambda a: a.weight, reverse=True )
+ tot = 0.0
+ for ml in range(3):
+ #{
+ if len(weight_groups) > ml:
+ #{
+ g = weight_groups[ml]
+ name = obj.vertex_groups[g.group].name
+ weight = g.weight
+
+ weights[ml] = weight
+ groups[ml] = armature_def['bones'].index(name)
+ tot += weight
+ #}
+ #}
+
+ if len(weight_groups) > 0:
+ #{
+ inv_norm = (1.0/tot) * 65535.0
+ for ml in range(3):
+ #{
+ weights[ml] = int( weights[ml] * inv_norm )
+ weights[ml] = min( weights[ml], 65535 )
+ weights[ml] = max( weights[ml], 0 )
+ #}
+ #}
+ #}
+ else:
+ #{
+ li1 = tri.loops[(j+1)%3]
+ vi1 = data.loops[li1].vertex_index
+ e0 = data.edges[ data.loops[li].edge_index ]
+
+ if e0.use_freestyle_mark and \
+ ((e0.vertices[0] == vi and e0.vertices[1] == vi1) or \
+ (e0.vertices[0] == vi1 and e0.vertices[1] == vi)):
+ #{
+ weights[0] = 1
+ #}
+ #}
+
+ # Add vertex and expand bound box
+ #
+ index = encoder_vertex_push( vertex_reference, co, \
+ norm, \
+ uv, \
+ colour, \
+ groups, \
+ weights )
+ g_encoder['data']['indice'] += [index]
+ #}
+ #}
+
+ # How many unique verts did we add in total
+ #
+ sm.vertex_count = len(g_encoder['data']['vertex']) - sm.vertex_start
+ sm.indice_count = len(g_encoder['data']['indice']) - sm.indice_start
+
+ # Make sure bounding box isn't -inf -> inf if no vertices
+ #
+ if sm.vertex_count == 0:
+ for j in range(2):
+ for i in range(3):
+ sm.bbx[j][i] = 0
+ else:
+ #{
+ for j in range(sm.vertex_count):
+ #{
+ vert = g_encoder['data']['vertex'][ sm.vertex_start + j ]
+
+ for i in range(3):
+ #{
+ sm.bbx[0][i] = min( sm.bbx[0][i], vert.co[i] )
+ sm.bbx[1][i] = max( sm.bbx[1][i], vert.co[i] )
+ #}
+ #}
+ #}
+
+ # Add submesh to encoder
+ #
+ g_encoder['data']['submesh'] += [sm]
+ node.submesh_count += 1
+
+ #}
+
+ # Save a reference to this node since we want to reuse the submesh indices
+ # later.
+ g_encoder['mesh_cache'][obj.data.name] = node
+#}
+
+
+def encoder_compile_ent_as( name, node, node_def ):
+#{
+ global g_encoder
+
+ if name == 'classtype_none':
+ #{
+ node.offset = 0
+ node.classtype = 0
+ return
+ #}
+ elif name not in globals():
+ #{
+ print( "Classtype '" +name + "' is unknown!" )
+ return
+ #}
+
+ buffer = g_encoder['data']['entdata']
+ node.offset = len(buffer)
+
+ cl = globals()[ name ]
+ inst = cl()
+ inst.encode_obj( node, node_def )
+
+ buffer.extend( bytearray(inst) )
+ bytearray_align_to( buffer, 4 )
+#}
+
+# Compiles animation data into model and gives us some extra node_def entries
+#
+def encoder_compile_armature( node, node_def ):
+#{
+ global g_encoder
+
+ entdata = g_encoder['data']['entdata']
+ animdata = g_encoder['data']['anim']
+ keyframedata = g_encoder['data']['keyframe']
+ mesh_cache = g_encoder['mesh_cache']
+ obj = node_def['obj']
+ bones = node_def['bones']
+
+ # extra info
+ node_def['anim_start'] = len(animdata)
+ node_def['anim_count'] = 0
+
+ # Compile anims
+ #
+ if obj.animation_data:
+ #{
+ # So we can restore later
+ #
+ previous_frame = bpy.context.scene.frame_current
+ previous_action = obj.animation_data.action
+ POSE_OR_REST_CACHE = obj.data.pose_position
+ obj.data.pose_position = 'POSE'
+
+ for NLALayer in obj.animation_data.nla_tracks:
+ #{
+ for NLAStrip in NLALayer.strips:
+ #{
+ # set active
+ #
+ for a in bpy.data.actions:
+ #{
+ if a.name == NLAStrip.name:
+ #{
+ obj.animation_data.action = a
+ break
+ #}
+ #}