+ cache = g_encoder['texture_cache']
+ buffer = g_encoder['data']['texture']
+ pack = g_encoder['data']['pack']
+
+ name = get_texture_resource_name( img )
+
+ if name in cache:
+ return cache[name]
+
+ cache[name] = len( buffer )
+
+ tex = mdl_texture()
+ tex.pstr_name = encoder_process_pstr( name )
+
+ if g_encoder['pack_textures']:
+ #{
+ tex.pack_offset = len( pack )
+ pack.extend( qoi_encode( img ) )
+ tex.pack_length = len( pack ) - tex.pack_offset
+ #}
+ else:
+ tex.pack_offset = 0
+
+ buffer += [ tex ]
+ return cache[name]
+#}
+
+def material_tex_image(v):
+#{
+ return {
+ "Image Texture":
+ {
+ "image": F"{v}"
+ }
+ }
+#}
+
+cxr_graph_mapping = \
+{
+ # Default shader setup
+ "Principled BSDF":
+ {
+ "Base Color":
+ {
+ "Image Texture":
+ {
+ "image": "tex_diffuse"
+ },
+ "Mix":
+ {
+ "Color1": material_tex_image("tex_diffuse"),
+ "Color2": material_tex_image("tex_decal")
+ },
+ },
+ "Normal":
+ {
+ "Normal Map":
+ {
+ "Color": material_tex_image("tex_normal")
+ }
+ }
+ }
+}
+
+# https://harrygodden.com/git/?p=convexer.git;a=blob;f=__init__.py;#l1164
+#
+def material_info(mat):
+#{
+ info = {}
+
+ # Using the cv_graph_mapping as a reference, go through the shader
+ # graph and gather all $props from it.
+ #
+ def _graph_read( node_def, node=None, depth=0 ):
+ #{
+ nonlocal mat
+ nonlocal info
+
+ # Find rootnodes
+ #
+ if node == None:
+ #{
+ _graph_read.extracted = []
+
+ for node_idname in node_def:
+ #{
+ for n in mat.node_tree.nodes:
+ #{
+ if n.name == node_idname:
+ #{
+ node_def = node_def[node_idname]
+ node = n
+ break
+ #}
+ #}
+ #}
+ #}
+
+ for link in node_def:
+ #{
+ link_def = node_def[link]
+
+ if isinstance( link_def, dict ):
+ #{
+ node_link = node.inputs[link]
+
+ if node_link.is_linked:
+ #{
+ # look for definitions for the connected node type
+ #
+ from_node = node_link.links[0].from_node
+
+ node_name = from_node.name.split('.')[0]
+ if node_name in link_def:
+ #{
+ from_node_def = link_def[ node_name ]
+
+ _graph_read( from_node_def, from_node, depth+1 )
+ #}
+
+ # No definition! :(
+ # TODO: Make a warning for this?
+ #}
+ else:
+ #{
+ if "default" in link_def:
+ #{
+ prop = link_def['default']
+ info[prop] = node_link.default_value
+ #}
+ #}
+ #}
+ else:
+ #{
+ prop = link_def
+ info[prop] = getattr( node, link )
+ #}
+ #}
+ #}
+
+ _graph_read( cxr_graph_mapping )
+ return info
+#}
+
+# Add a material to the material buffer. Returns 0 (None ID) if invalid
+#
+def encoder_process_material( mat ):
+#{
+ global g_encoder
+
+ if mat == None:
+ return 0
+
+ cache = g_encoder['material_cache']
+ buffer = g_encoder['data']['material']
+
+ if mat.name in cache:
+ return cache[mat.name]
+
+ cache[mat.name] = len( buffer )
+
+ dest = mdl_material()
+ dest.pstr_name = encoder_process_pstr( mat.name )
+
+ flags = 0x00
+ if mat.cv_data.collision:
+ flags |= 0x2
+ if mat.cv_data.skate_surface: flags |= 0x1
+ if mat.cv_data.grind_surface: flags |= (0x8|0x1)
+
+ if mat.cv_data.grow_grass: flags |= 0x4
+ dest.flags = flags
+
+ if mat.cv_data.surface_prop == 'concrete': dest.surface_prop = 0
+ if mat.cv_data.surface_prop == 'wood': dest.surface_prop = 1
+ if mat.cv_data.surface_prop == 'grass': dest.surface_prop = 2
+
+ if mat.cv_data.shader == 'standard': dest.shader = 0
+ if mat.cv_data.shader == 'standard_cutout': dest.shader = 1
+ if mat.cv_data.shader == 'terrain_blend':
+ #{
+ dest.shader = 2
+
+ dest.colour[0] = pow( mat.cv_data.sand_colour[0], 1.0/2.2 )
+ dest.colour[1] = pow( mat.cv_data.sand_colour[1], 1.0/2.2 )
+ dest.colour[2] = pow( mat.cv_data.sand_colour[2], 1.0/2.2 )
+ dest.colour[3] = 1.0
+
+ dest.colour1[0] = mat.cv_data.blend_offset[0]
+ dest.colour1[1] = mat.cv_data.blend_offset[1]
+ #}
+
+ if mat.cv_data.shader == 'vertex_blend':
+ #{
+ dest.shader = 3
+
+ dest.colour1[0] = mat.cv_data.blend_offset[0]
+ dest.colour1[1] = mat.cv_data.blend_offset[1]
+ #}
+
+ if mat.cv_data.shader == 'water':
+ #{
+ dest.shader = 4
+
+ dest.colour[0] = pow( mat.cv_data.shore_colour[0], 1.0/2.2 )
+ dest.colour[1] = pow( mat.cv_data.shore_colour[1], 1.0/2.2 )
+ dest.colour[2] = pow( mat.cv_data.shore_colour[2], 1.0/2.2 )
+ dest.colour[3] = 1.0
+ dest.colour1[0] = pow( mat.cv_data.ocean_colour[0], 1.0/2.2 )
+ dest.colour1[1] = pow( mat.cv_data.ocean_colour[1], 1.0/2.2 )
+ dest.colour1[2] = pow( mat.cv_data.ocean_colour[2], 1.0/2.2 )
+ dest.colour1[3] = 1.0
+ #}
+
+ inf = material_info( mat )
+
+ if mat.cv_data.shader == 'standard' or \
+ mat.cv_data.shader == 'standard_cutout' or \
+ mat.cv_data.shader == 'terrain_blend' or \
+ mat.cv_data.shader == 'vertex_blend':
+ #{
+ if 'tex_diffuse' in inf:
+ dest.tex_diffuse = encoder_process_texture(inf['tex_diffuse'])
+ #}
+
+ buffer += [dest]
+ return cache[mat.name]
+#}
+
+# Create a tree structure containing all the objects in the collection
+#
+def encoder_build_scene_graph( collection ):
+#{
+ global g_encoder
+
+ print( " creating scene graph" )
+
+ # initialize root
+ #
+ graph = g_encoder['scene_graph']
+ graph_lookup = g_encoder['graph_lookup']
+ graph["obj"] = None
+ graph["depth"] = 0
+ graph["children"] = []
+ graph["uid"] = 0
+ graph["parent"] = None
+
+ def _new_uid():
+ #{
+ global g_encoder
+ uid = g_encoder['uid_count']
+ g_encoder['uid_count'] += 1
+ return uid
+ #}
+
+ for obj in collection.all_objects:
+ #{
+ if obj.parent: continue
+
+ def _extend( p, n, d ):
+ #{
+ uid = _new_uid()
+ tree = {}
+ tree["uid"] = uid
+ tree["children"] = []
+ tree["depth"] = d
+ tree["obj"] = n
+ tree["parent"] = p
+ n.cv_data.uid = uid
+
+ # Descend into amature
+ #
+ if n.type == 'ARMATURE':
+ #{
+ tree["bones"] = [None] # None is the root transform
+ tree["ik_count"] = 0
+ tree["collider_count"] = 0
+
+ # Here also collects some information about constraints, ik and
+ # counts colliders for the armature.
+ #
+ def _extendb( p, n, d ):
+ #{
+ nonlocal tree
+
+ btree = {}
+ btree["bone"] = n
+ btree["linked_armature"] = tree
+ btree["uid"] = _new_uid()
+ btree["children"] = []
+ btree["depth"] = d
+ btree["parent"] = p
+ tree["bones"] += [n.name]
+
+ for c in n.children:
+ #{
+ _extendb( btree, c, d+1 )
+ #}
+
+ for c in tree['obj'].pose.bones[n.name].constraints:
+ #{
+ if c.type == 'IK':
+ #{
+ btree["ik_target"] = c.subtarget
+ btree["ik_pole"] = c.pole_subtarget
+ tree["ik_count"] += 1
+ #}
+ #}
+
+ if n.cv_data.collider:
+ tree['collider_count'] += 1
+
+ btree['deform'] = n.use_deform
+ p['children'] += [btree]
+ #}
+
+ for b in n.data.bones:
+ if not b.parent:
+ _extendb( tree, b, d+1 )
+ #}
+
+ # Recurse into children of this object
+ #
+ for obj1 in n.children:
+ #{
+ nonlocal collection
+ for c1 in obj1.users_collection:
+ #{
+ if c1 == collection:
+ #{
+ _extend( tree, obj1, d+1 )
+ break
+ #}
+ #}
+ #}
+
+ p["children"] += [tree]
+ graph_lookup[n] = tree
+
+ #}
+
+ _extend( graph, obj, 1 )
+
+ #}
+#}
+
+
+# Kind of a useless thing i made but it looks cool and adds complexity!!1
+#
+def encoder_graph_iterator( root ):
+#{
+ for c in root['children']:
+ #{
+ yield c
+ yield from encoder_graph_iterator(c)
+ #}
+#}
+
+
+# Push a vertex into the model file, or return a cached index (c_uint32)
+#
+def encoder_vertex_push( vertex_reference, co,norm,uv,colour,groups,weights ):
+#{
+ global g_encoder
+ buffer = g_encoder['data']['vertex']
+
+ TOLERENCE = 4
+ m = float(10**TOLERENCE)
+
+ # Would be nice to know if this can be done faster than it currently runs,
+ # its quite slow.
+ #
+ key = (int(co[0]*m+0.5),
+ int(co[1]*m+0.5),
+ int(co[2]*m+0.5),
+ int(norm[0]*m+0.5),
+ int(norm[1]*m+0.5),
+ int(norm[2]*m+0.5),
+ int(uv[0]*m+0.5),
+ int(uv[1]*m+0.5),
+ colour[0], # these guys are already quantized
+ colour[1], # .
+ colour[2], # .
+ colour[3], # .
+ weights[0], # v
+ weights[1],
+ weights[2],
+ weights[3],
+ groups[0],
+ groups[1],
+ groups[2],
+ groups[3])
+
+ if key in vertex_reference:
+ return vertex_reference[key]
+ else:
+ #{
+ index = c_uint32( len(vertex_reference) )
+ vertex_reference[key] = index
+
+ v = mdl_vert()
+ v.co[0] = co[0]
+ v.co[1] = co[2]
+ v.co[2] = -co[1]
+ v.norm[0] = norm[0]
+ v.norm[1] = norm[2]
+ v.norm[2] = -norm[1]
+ v.uv[0] = uv[0]
+ v.uv[1] = uv[1]
+ v.colour[0] = colour[0]
+ v.colour[1] = colour[1]
+ v.colour[2] = colour[2]
+ v.colour[3] = colour[3]
+ v.weights[0] = weights[0]
+ v.weights[1] = weights[1]
+ v.weights[2] = weights[2]
+ v.weights[3] = weights[3]
+ v.groups[0] = groups[0]
+ v.groups[1] = groups[1]
+ v.groups[2] = groups[2]
+ v.groups[3] = groups[3]
+
+ buffer += [v]
+ return index
+ #}
+#}
+
+
+# Compile a mesh (or use one from the cache) onto node, based on node_def
+# No return value
+#
+def encoder_compile_mesh( node, node_def ):
+#{
+ global g_encoder
+
+ graph = g_encoder['scene_graph']
+ graph_lookup = g_encoder['graph_lookup']
+ mesh_cache = g_encoder['mesh_cache']
+ obj = node_def['obj']
+ armature_def = None
+ can_use_cache = True
+
+ # Check for modifiers that typically change the data per-instance
+ # there is no well defined rule for the choices here, its just what i've
+ # needed while producing the game.
+ #
+ # It may be possible to detect these cases automatically.
+ #
+ for mod in obj.modifiers:
+ #{
+ if mod.type == 'DATA_TRANSFER' or mod.type == 'SHRINKWRAP' or \
+ mod.type == 'BOOLEAN' or mod.type == 'CURVE' or \
+ mod.type == 'ARRAY':
+ #{
+ can_use_cache = False
+ #}
+
+ if mod.type == 'ARMATURE':
+ armature_def = graph_lookup[mod.object]
+
+ # Check the cache first
+ #
+ if can_use_cache and (obj.data.name in mesh_cache):
+ #{
+ ref = mesh_cache[obj.data.name]
+ node.submesh_start = ref.submesh_start
+ node.submesh_count = ref.submesh_count
+ return
+ #}
+
+ # Compile a whole new mesh
+ #
+ node.submesh_start = len( g_encoder['data']['submesh'] )
+ node.submesh_count = 0
+
+ dgraph = bpy.context.evaluated_depsgraph_get()
+ data = obj.evaluated_get(dgraph).data
+ data.calc_loop_triangles()
+ data.calc_normals_split()
+
+ # Mesh is split into submeshes based on their material
+ #
+ mat_list = data.materials if len(data.materials) > 0 else [None]
+ for material_id, mat in enumerate(mat_list):
+ #{
+ mref = {}
+
+ sm = mdl_submesh()
+ sm.indice_start = len( g_encoder['data']['indice'] )
+ sm.vertex_start = len( g_encoder['data']['vertex'] )
+ sm.vertex_count = 0
+ sm.indice_count = 0
+ sm.material_id = encoder_process_material( mat )
+
+ for i in range(3):
+ #{
+ sm.bbx[0][i] = 999999
+ sm.bbx[1][i] = -999999
+ #}
+
+ # Keep a reference to very very very similar vertices
+ #
+ vertex_reference = {}
+
+ # Write the vertex / indice data
+ #
+ for tri_index, tri in enumerate(data.loop_triangles):
+ #{
+ if tri.material_index != material_id:
+ continue
+
+ for j in range(3):
+ #{
+ vert = data.vertices[tri.vertices[j]]
+ li = tri.loops[j]
+ vi = data.loops[li].vertex_index