model fmt & heisenbug
[carveJwlIkooP6JGAAIwe30JlM.git] / blender_export.py
index 2ed183aa075b7708b8f3b8b021700ea1e7e49ea5..a7e714c2fae4a76661b19f7a9fcfa0e1f9412be1 100644 (file)
@@ -9,9 +9,9 @@ from mathutils import *
 from gpu_extras.batch import batch_for_shader
 
 bl_info = {
-   "name":"Carve exporter",
+   "name":"Skate Rift model compiler",
    "author": "Harry Godden (hgn)",
-   "version": (0,1),
+   "version": (0,2),
    "blender":(3,1,0),
    "location":"Export",
    "descriptin":"",
@@ -20,16 +20,19 @@ bl_info = {
    "category":"Import/Export",
 }
 
-class mdl_vert(Structure):
-   _pack_ = 1
+class mdl_vert(Structure):              # 48 bytes. Quite large. Could compress
+#{                                      # the normals and uvs to i16s. Not an
+   _pack_ = 1                           # real issue, yet.
    _fields_ = [("co",c_float*3),
                ("norm",c_float*3),
                ("uv",c_float*2),
                ("colour",c_uint8*4),
                ("weights",c_uint16*4),
                ("groups",c_uint8*4)]
+#}
 
 class mdl_submesh(Structure):
+#{
    _pack_ = 1
    _fields_ = [("indice_start",c_uint32),
                ("indice_count",c_uint32),
@@ -37,12 +40,16 @@ class mdl_submesh(Structure):
                ("vertex_count",c_uint32),
                ("bbx",(c_float*3)*2),
                ("material_id",c_uint32)]        # index into the material array
+#}
 
 class mdl_material(Structure):
+#{
    _pack_ = 1
    _fields_ = [("pstr_name",c_uint32)]
+#}
 
 class mdl_node(Structure):
+#{
    _pack_ = 1
    _fields_ = [("co",c_float*3),
                ( "q",c_float*4),
@@ -54,19 +61,18 @@ class mdl_node(Structure):
                ("offset",c_uint32),
                ("parent",c_uint32),
                ("pstr_name",c_uint32)]
+#}
 
 class mdl_header(Structure):
+#{
    _pack_ = 1
    _fields_ = [("identifier",c_uint32),
                ("version",c_uint32),
                ("file_length",c_uint32),
                ("pad0",c_uint32),
 
-               ("vertex_count",c_uint32),
-               ("vertex_offset",c_uint32),
-
-               ("indice_count",c_uint32),
-               ("indice_offset",c_uint32),
+               ("node_count",c_uint32),
+               ("node_offset",c_uint32),
 
                ("submesh_count",c_uint32),
                ("submesh_offset",c_uint32),
@@ -74,84 +80,161 @@ class mdl_header(Structure):
                ("material_count",c_uint32),
                ("material_offset",c_uint32),
 
-               ("node_count",c_uint32),
-               ("node_offset",c_uint32),
-
                ("anim_count",c_uint32),
                ("anim_offset",c_uint32),
-               
-               ("strings_length",c_uint32),
-               ("strings_offset",c_uint32),
 
-               ("entdata_length",c_uint32),
+               ("entdata_size",c_uint32),
                ("entdata_offset",c_uint32),
+               
+               ("strings_size",c_uint32),
+               ("strings_offset",c_uint32),
 
                ("keyframe_count",c_uint32),
-               ("keyframe_offset",c_uint32)]
-               
+               ("keyframe_offset",c_uint32),
+
+               ("vertex_count",c_uint32),
+               ("vertex_offset",c_uint32),
+
+               ("indice_count",c_uint32),
+               ("indice_offset",c_uint32),]
+#}
 
 class mdl_animation(Structure):
+#{
    _pack_ = 1
    _fields_ = [("pstr_name",c_uint32),
                ("length",c_uint32),
                ("rate",c_float),
                ("offset",c_uint32)]
+#}
 
 class mdl_keyframe(Structure):
+#{
    _pack_ = 1
    _fields_ = [("co",c_float*3),
                ("q",c_float*4),
                ("s",c_float*3)]
+#}
 
 # Entity types
 # ==========================================
+#
+# ctypes _fields_ defines the data which is filled in by:
+#  def encode_obj( _, node, node_def ):
+#
+# gizmos get drawn into the viewport via:
+#  @staticmethod
+#  def editor_interface( object ):
+#
 
 class classtype_gate(Structure):
+#{
    _pack_ = 1
    _fields_ = [("target",c_uint32),
                ("dims",c_float*3)]
 
-class classtype_block(Structure):
-   _pack_ = 1
-   _fields_ = [("bbx",(c_float*3)*2)]
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 1
+
+      obj = node_def['obj']
+
+      if obj.cv_data.target != None:
+         _.target = obj.cv_data.target.cv_data.uid
+
+      if obj.type == 'MESH':
+      #{
+         _.dims[0] = obj.data.cv_data.v0[0]
+         _.dims[1] = obj.data.cv_data.v0[1]
+         _.dims[2] = obj.data.cv_data.v0[2]
+      #}
+      else:
+      #{
+         _.dims[0] = obj.cv_data.v0[0]
+         _.dims[1] = obj.cv_data.v0[1]
+         _.dims[2] = obj.cv_data.v0[2]
+      #}
+   #}
+#}
 
 class classtype_spawn(Structure):
+#{
    _pack_ = 1
-   _fields_ = [("temp",c_uint32)]
+   _fields_ = [("pstr_alias",c_uint32)]
+
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 3
+      _.pstr_alias = encoder_process_pstr( node_def['obj'].cv_data.strp )
+   #}
+#}
 
 class classtype_water(Structure):
+#{
    _pack_ = 1
    _fields_ = [("temp",c_uint32)]
 
-class classtype_car_path(Structure):
-   _pack_ = 1
-   _fields_ = [("target",c_uint32),
-               ("target1",c_uint32)]
-
-class classtype_instance(Structure):
-   _pack_ = 1
-   _fields_ = [("pstr_file",c_uint32)]
-
-class classtype_capsule(Structure):
-   _pack_ = 1
-   _fields_ = [("height",c_float),
-               ("radius",c_float)]
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 4
+      # no data, spooky
+   #}
+#}
 
 class classtype_route_node(Structure):
+#{
    _pack_ = 1
    _fields_ = [("target",c_uint32),
                ("target1",c_uint32)]
 
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 8
+      obj = node_def['obj']
+
+      if obj.cv_data.target != None:
+         _.target = obj.cv_data.target.cv_data.uid
+      if obj.cv_data.target1 != None: 
+         _.target1 = obj.cv_data.target1.cv_data.uid
+   #}
+#}
+
 class classtype_route(Structure):
+#{
    _pack_ = 1
    _fields_ = [("id_start",c_uint32),
                ("colour",c_float*3)]
 
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 9
+      obj = node_def['obj']
+
+      _.colour[0] = obj.cv_data.colour[0]
+      _.colour[1] = obj.cv_data.colour[1]
+      _.colour[2] = obj.cv_data.colour[2]
+
+      if obj.cv_data.target != None: 
+         _.id_start = obj.cv_data.target.cv_data.uid
+   #}
+#}
+
 class classtype_skin(Structure):
+#{
    _pack_ = 1
    _fields_ = [("skeleton",c_uint32)]
 
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 12
+      
+      armature_def = node_def['linked_armature']
+      _.skeleton = armature_def['obj'].cv_data.uid
+   #}
+#}
+
 class classtype_skeleton(Structure):
+#{
    _pack_ = 1
    _fields_ = [("channels",c_uint32),
                ("ik_count",c_uint32),
@@ -159,7 +242,20 @@ class classtype_skeleton(Structure):
                ("anim_start",c_uint32),
                ("anim_count",c_uint32)]
 
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 11
+      
+      _.channels        = len( node_def['bones'] )
+      _.ik_count        = node_def['ik_count']
+      _.collider_count  = node_def['collider_count']
+      _.anim_start      = node_def['anim_start']
+      _.anim_count      = node_def['anim_count']
+   #}
+#}
+
 class classtype_bone(Structure):
+#{
    _pack_ = 1
    _fields_ = [("deform",c_uint32),
                ("ik_target",c_uint32),
@@ -169,87 +265,153 @@ class classtype_bone(Structure):
                ("angle_limits",(c_float*3)*2),
                ("hitbox",(c_float*3)*2)]
 
+   def encode_obj(_, node,node_def):
+   #{
+      node.classtype = 10
+
+      armature_def = node_def['linked_armature']
+      obj = node_def['bone']
+      
+      _.deform = node_def['deform']
+      
+      if 'ik_target' in node_def:
+      #{
+         _.ik_target = armature_def['bones'].index( node_def['ik_target'] )
+         _.ik_pole   = armature_def['bones'].index( node_def['ik_pole'] )
+      #}
+      
+      # For ragdolls
+      #
+      if obj.cv_data.collider:
+      #{
+         _.collider = 1
+         _.hitbox[0][0] =  obj.cv_data.v0[0]
+         _.hitbox[0][1] =  obj.cv_data.v0[2]
+         _.hitbox[0][2] = -obj.cv_data.v1[1]
+         _.hitbox[1][0] =  obj.cv_data.v1[0]
+         _.hitbox[1][1] =  obj.cv_data.v1[2]
+         _.hitbox[1][2] = -obj.cv_data.v0[1]
+      #}
+
+      if obj.cv_data.con0:
+      #{
+         _.use_limits = 1 
+         _.angle_limits[0][0] =  obj.cv_data.mins[0]
+         _.angle_limits[0][1] =  obj.cv_data.mins[2]
+         _.angle_limits[0][2] = -obj.cv_data.maxs[1]
+         _.angle_limits[1][0] =  obj.cv_data.maxs[0]
+         _.angle_limits[1][1] =  obj.cv_data.maxs[2]
+         _.angle_limits[1][2] = -obj.cv_data.mins[1]
+      #}
+   #}
+#}
+
+
+# TO BE REPLACED
+#
 class classtype_achievement_box(Structure):
+#{
    _pack_ = 1
    _fields_ = [("pstr_name",c_uint32),
                ("trigger",c_uint32)]
 
+   def encode_obj(_, node,node_def ):
+   #{
+      node.classtype = 0
+   #}
+#}
+
 class classtype_audio(Structure):
+#{
    _pack_ = 1
    _fields_ = [("pstr_file",c_uint32),
                ("flags",c_uint32),
                ("volume",c_float)]
 
-# Exporter
-# ==============================================================================
+   def encode_obj(_, node,node_def ):
+   #{
+      node.classtype = 14
 
-def write_model(collection_name):
-   print( F"Model graph | Create mode '{collection_name}'" )
+      obj = node_def['obj']
 
-   header = mdl_header()
-   header.identifier = 0xABCD0000
-   header.version = 0
-   header.vertex_count = 0
-   header.indice_count = 0
-   header.submesh_count = 0
-   header.node_count = 0
-   header.material_count = 0
-   header.file_length = 0
-
-   header.strings_length = 0
-   header.entdata_length = 0
-   header.keyframe_count = 0
+      _.pstr_file = encoder_process_pstr( obj.cv_data.strp )
+      _.flags = obj.cv_data.intp
+      _.volume = obj.cv_data.fltp
+   #}
    
-   mesh_cache = {}
-   string_cache = {}
-   material_cache = {}
+   @staticmethod
+   def editor_interface(yada):
+   #{
+      pass
+   #}
 
-   strings_buffer = b''
-   
-   material_buffer = []
-   submesh_buffer = []
-   vertex_buffer = []
-   indice_buffer = []
-   node_buffer = []
-   entdata_buffer = []
+   @staticmethod
+   def draw_scene_helpers(yada):
+   #{
+      pass
+   #}
+#}
 
-   anim_buffer = []
-   animdata_buffer = []
 
-   def emplace_string( s ):
-      nonlocal string_cache, strings_buffer
-
-      if s in string_cache:
-         return string_cache[s]
-      
-      string_cache[s] = len( strings_buffer )
-      strings_buffer += (s+'\0').encode('utf-8')
-      return string_cache[s]
+# Current encoder state
+#
+g_encoder = None
 
-   def emplace_material( mat ):
-      nonlocal material_cache, material_buffer
 
-      if mat == None:
-         return 0
+# Reset encoder
+#
+def encoder_init():
+#{
+   global g_encoder
 
-      if mat.name in material_cache:
-         return material_cache[mat.name]
+   g_encoder = \
+   {
+      # The actual file header
+      #
+      'header': mdl_header(),
 
-      material_cache[mat.name] = header.material_count
-      dest = mdl_material()
-      dest.pstr_name = emplace_string( mat.name )
-      material_buffer += [dest]
+      # Compiled data chunks (each can be read optionally by the client)
+      #
+      'data':
+      {
+         #1---------------------------------
+         'node': [],      # Metadata 'chunk'
+         'submesh': [],
+         'material': [],
+         'anim': [],
+         'entdata': bytearray(), # variable width
+         'strings': bytearray(), # .
+         #2---------------------------------
+         'keyframe': [],  # Animations
+         #3---------------------------------
+         'vertex': [],    # Mesh data
+         'indice': [],
+      },
+
+      # All objects of the model in their final heirachy
+      #
+      "uid_count": 1,
+      "scene_graph":{},
+      "graph_lookup":{},
+      
+      # Allows us to reuse definitions
+      #
+      'string_cache':{},
+      'mesh_cache': {},
+      'material_cache': {},
+   }
 
-      header.material_count += 1
-      return material_cache[mat.name]
+   g_encoder['header'].identifier = 0xABCD0000
+   g_encoder['header'].version = 1
 
-   # Create root or empty node and materials
-   # this is to designate id 0 as 'NULL'
+   # Add fake NoneID material
    #
-   none_material = c_uint32(69)
+   none_material = c_uint32(1234)
    none_material.name = ""
-   emplace_material( none_material )
+   encoder_process_material( none_material )
 
+   # Add root node
+   #
    root = mdl_node()
    root.co[0] = 0
    root.co[1] = 0
@@ -261,744 +423,862 @@ def write_model(collection_name):
    root.s[0] = 1
    root.s[1] = 1
    root.s[2] = 1
-   root.pstr_name = emplace_string('')
+   root.pstr_name = encoder_process_pstr('')
    root.submesh_start = 0
    root.submesh_count = 0
    root.offset = 0
    root.classtype = 0
-   node_buffer += [root]
-
-   # Do exporting
-   #
-   print( "  assigning ids" )
-   collection = bpy.data.collections[collection_name]
-
-   # Scene graph
-   # ==========================================
-   
-   header.node_count = 0
-   def _uid():
-      nonlocal header
-      uid = header.node_count
-      header.node_count += 1
-      return uid
-
-   print( "  creating scene graph" )
-   graph = {}
-   graph["obj"] = None
-   graph["depth"] = 0
-   graph["children"] = []
-   graph["uid"] = _uid()
-   graph["parent"] = None
-
-   graph_lookup = {} # object can lookup its graph def here
-
-   for obj in collection.all_objects:
-      if not obj.parent:
-
-         def _extend( p, n, d ):
-            uid = _uid()
-            tree = {}
-            tree["uid"] = uid
-            tree["children"] = []
-            tree["depth"] = d
-            tree["obj"] = n
-            tree["parent"] = p
-            n.cv_data.uid = uid
-
-            if n.type == 'ARMATURE':
-               tree["bones"] = [None] # None is the root transform
-               tree["ik_count"] = 0
-               tree["collider_count"] = 0
-
-               def _extendb( p, n, d ):
-                  nonlocal tree
-
-                  btree = {}
-                  btree["bone"] = n
-                  btree["uid"] = _uid()
-                  btree["children"] = []
-                  btree["depth"] = d
-                  btree["parent"] = p
-                  tree["bones"] += [n.name]
-
-                  for c in n.children:
-                     _extendb( btree, c, d+1 )
-
-                  for c in tree['obj'].pose.bones[n.name].constraints:
-                     if c.type == 'IK':
-                        btree["target"] = c.subtarget
-                        btree["pole"] = c.pole_subtarget
-                        tree["ik_count"] += 1
-
-                  if n.cv_data.collider:
-                     tree['collider_count'] += 1
-
-                  btree['deform'] = n.use_deform
-                  p['children'] += [btree]
-
-               for b in n.data.bones:
-                  if not b.parent:
-                     _extendb( tree, b, d+1 )
-
-            for obj1 in n.children:
-               nonlocal collection
-               for c1 in obj1.users_collection:
-                  if c1 == collection:
-                     _extend( tree, obj1, d+1 )
-                     break
-
-            p["children"] += [tree]
-            graph_lookup[n] = tree
-
-         _extend( graph, obj, 1 )
-
-
-   def _graph_iter(p):
-      for c in p['children']:
-         yield c
-         yield from _graph_iter(c)
-
-   it = _graph_iter(graph)
-
    root.parent = 0xffffffff
 
-   # Compile
-   # ==============================================
-   it = _graph_iter(graph)
-   print( "  compiling data" )
-   for node_def in it:
-      if 'obj' in node_def:
-         obj = node_def['obj']
-         objt = obj.type
-         objco = obj.location
-      elif 'bone' in node_def:
-         obj = node_def['bone']
-         objt = 'BONE'
-         objco = obj.head_local
-
-      depth = node_def['depth']
-      uid = node_def['uid']
-
-      node = mdl_node()
-      node.co[0] =  objco[0]
-      node.co[1] =  objco[2]
-      node.co[2] = -objco[1]
-      
-      # Convert rotation quat to our space type
-      quat = obj.matrix_local.to_quaternion()
-      node.q[0] =  quat[1]
-      node.q[1] =  quat[3]
-      node.q[2] = -quat[2]
-      node.q[3] =  quat[0]
-      
-      if objt == 'BONE':
-         node.s[0] =  obj.tail_local[0] - node.co[0]
-         node.s[1] =  obj.tail_local[2] - node.co[1]
-         node.s[2] = -obj.tail_local[1] - node.co[2]
-      else:
-         node.s[0] = obj.scale[0]
-         node.s[1] = obj.scale[2]
-         node.s[2] = obj.scale[1]
+   g_encoder['data']['node'] += [root]
+#}
 
-      node.pstr_name = emplace_string( obj.name )
 
-      if node_def["parent"]:
-         node.parent = node_def["parent"]["uid"]
+# fill with 0x00 until a multiple of align. Returns how many bytes it added
+#
+def bytearray_align_to( buffer, align, offset=0 ):
+#{
+   count = 0
 
-      if objt == 'BONE':
-         classtype = 'k_classtype_bone'
-      elif objt == 'ARMATURE':
-         classtype = 'k_classtype_skeleton'
-      else:
-         classtype = obj.cv_data.classtype
-      
-      # Process type: MESH
-      # =================================================================
-      #
+   while ((len(buffer)+offset) % align) != 0:
+   #{
+      buffer.extend( b'\0' )
+      count += 1
+   #}
 
-      # Dont use the cache if we have modifiers that affect the normals
-      #
-      compile_mesh = False
-      if objt == 'MESH':
-         armature_def = None
-         compile_mesh = True
-         can_use_cache = True
+   return count
+#}
 
-         for mod in obj.modifiers:
-            if mod.type == 'DATA_TRANSFER' or mod.type == 'SHRINKWRAP' or \
-               mod.type == 'BOOLEAN' or mod.type == 'CURVE' or \
-               mod.type == 'ARRAY':
-               can_use_cache = False
+# Add a string to the string buffer except if it already exists there then we
+# just return its ID.
+#
+def encoder_process_pstr( s ):
+#{
+   global g_encoder
 
-            if mod.type == 'ARMATURE':
-               classtype = 'k_classtype_skin'
-               armature_def = graph_lookup[mod.object]
-               POSE_OR_REST_CACHE = armature_def['obj'].data.pose_position
+   cache = g_encoder['string_cache']
 
-               armature_def['obj'].data.pose_position = 'REST'
+   if s in cache:
+      return cache[s]
+   
+   cache[s] = len( g_encoder['data']['strings'] )
 
-         if can_use_cache and obj.data.name in mesh_cache:
-            ref = mesh_cache[obj.data.name]
-            node.submesh_start = ref.submesh_start
-            node.submesh_count = ref.submesh_count
-            compile_mesh = False
+   buffer = g_encoder['data']['strings']
+   buffer.extend( s.encode('utf-8') )
+   buffer.extend( b'\0' )
+   
+   bytearray_align_to( buffer, 4 )
+   return cache[s]
+#}
 
-      if compile_mesh:
-         node.submesh_start = header.submesh_count
-         node.submesh_count = 0
+# Add a material to the material buffer. Returns 0 (None ID) if invalid
+#
+def encoder_process_material( mat ):
+#{
+   global g_encoder
 
-         default_mat = c_uint32(69)
-         default_mat.name = ""
+   if mat == None:
+      return 0
 
-         dgraph = bpy.context.evaluated_depsgraph_get()
-         data = obj.evaluated_get(dgraph).data
-         data.calc_loop_triangles()
-         data.calc_normals_split()
+   cache = g_encoder['material_cache']
+   buffer = g_encoder['data']['material']
 
-         mat_list = data.materials if len(data.materials) > 0 else [default_mat]
-         for material_id, mat in enumerate(mat_list):
-            mref = {}
+   if mat.name in cache:
+      return cache[mat.name]
 
-            sm = mdl_submesh()
-            sm.indice_start = header.indice_count
-            sm.vertex_start = header.vertex_count
-            sm.vertex_count = 0
-            sm.indice_count = 0
-            sm.material_id = emplace_material( mat )
+   cache[mat.name] = len( buffer )
 
-            for i in range(3):
-               sm.bbx[0][i] =  999999
-               sm.bbx[1][i] = -999999
+   dest = mdl_material()
+   dest.pstr_name = encoder_process_pstr( mat.name )
+   buffer += [dest]
 
-            boffa = {}
+   return cache[mat.name]
+#}
 
-            # Write the vertex / indice data
-            #
-            for tri_index, tri in enumerate(data.loop_triangles):
-               if tri.material_index != material_id:
-                  continue
+# Create a tree structure containing all the objects in the collection
+#
+def encoder_build_scene_graph( collection ):
+#{
+   global g_encoder
 
-               for j in range(3):
-                  vert = data.vertices[tri.vertices[j]]
-                  li = tri.loops[j]
-                  vi = data.loops[li].vertex_index
-
-                  co = vert.co
-                  norm = data.loops[li].normal
-                  uv = (0,0)
-                  colour = (255,255,255,255)
-                  groups = [0,0,0,0]
-                  weights = [0,0,0,0]
-
-                  if data.uv_layers:
-                     uv = data.uv_layers.active.data[li].uv
-
-                  if data.vertex_colors:
-                     colour = data.vertex_colors.active.data[li].color
-                     colour = (int(colour[0]*255.0),\
-                               int(colour[1]*255.0),\
-                               int(colour[2]*255.0),\
-                               int(colour[3]*255.0))
-                  
-                  # WEight groups
-                  #
-                  if armature_def:
-                     src_groups = [_ for _ in data.vertices[vi].groups \
-                                 if obj.vertex_groups[_.group].name in \
-                                    armature_def['bones']]
-
-                     weight_groups = sorted( src_groups, key = \
-                                             lambda a: a.weight, reverse=True )
-                     tot = 0.0
-                     for ml in range(3):
-                        if len(weight_groups) > ml:
-                           g = weight_groups[ml]
-                           name = obj.vertex_groups[g.group].name
-                           weight = g.weight
-
-                           weights[ml] = weight
-                           groups[ml] = armature_def['bones'].index(name)
-                           tot += weight
-                  
-                     if len(weight_groups) > 0:
-                        inv_norm = (1.0/tot) * 65535.0
-                        for ml in range(3):
-                           weights[ml] = int( weights[ml] * inv_norm )
-                           weights[ml] = min( weights[ml], 65535 )
-                           weights[ml] = max( weights[ml], 0 )
-
-                  TOLERENCE = 4
-                  m = float(10**TOLERENCE)
-
-                  key = (int(co[0]*m+0.5),\
-                         int(co[1]*m+0.5),\
-                         int(co[2]*m+0.5),\
-                         int(norm[0]*m+0.5),\
-                         int(norm[1]*m+0.5),\
-                         int(norm[2]*m+0.5),\
-                         int(uv[0]*m+0.5),\
-                         int(uv[1]*m+0.5),\
-                         colour[0],\
-                         colour[1],\
-                         colour[2],\
-                         colour[3],\
-                         weights[0],\
-                         weights[1],\
-                         weights[2],\
-                         weights[3],\
-                         groups[0],\
-                         groups[1],\
-                         groups[2],\
-                         groups[3])
-
-                  if key in boffa:
-                     indice_buffer += [boffa[key]]
-                  else:
-                     index = c_uint32(sm.vertex_count)
-                     sm.vertex_count += 1
-                     
-                     boffa[key] = index
-                     indice_buffer += [index]
-
-                     v = mdl_vert()
-                     v.co[0] =  co[0]
-                     v.co[1] =  co[2]
-                     v.co[2] = -co[1]
-                     v.norm[0] =  norm[0]
-                     v.norm[1] =  norm[2]
-                     v.norm[2] = -norm[1]
-                     v.uv[0] = uv[0]
-                     v.uv[1] = uv[1]
-                     v.colour[0] = colour[0]
-                     v.colour[1] = colour[1]
-                     v.colour[2] = colour[2]
-                     v.colour[3] = colour[3]
-                     v.weights[0] = weights[0]
-                     v.weights[1] = weights[1]
-                     v.weights[2] = weights[2]
-                     v.weights[3] = weights[3]
-                     v.groups[0] = groups[0]
-                     v.groups[1] = groups[1]
-                     v.groups[2] = groups[2]
-                     v.groups[3] = groups[3]
-
-                     vertex_buffer += [v]
-
-                     for i in range(3):
-                        sm.bbx[0][i] = min( sm.bbx[0][i], v.co[i] )
-                        sm.bbx[1][i] = max( sm.bbx[1][i], v.co[i] )
-
-                  sm.indice_count += 1
-
-            if sm.vertex_count == 0:
-               for j in range(2):
-                  for i in range(3):
-                     sm.bbx[j][i] = 0
-
-            submesh_buffer += [sm]
-            node.submesh_count += 1
-            header.submesh_count += 1
-            header.vertex_count += sm.vertex_count
-            header.indice_count += sm.indice_count
-
-         mesh_cache[obj.data.name] = node
-
-      # Process entity data
-      # ==================================================================
-      node.offset = header.entdata_length
-
-      if classtype != 'k_classtype_none':
-         disptype = classtype
-      else:
-         disptype = objt
+   print( "  creating scene graph" )
 
-      s000 = F"  [{uid: 3}/{header.node_count-1}]" + " |"*(depth-1)
-      s001 = F" L {obj.name}"
-      s002 = s000+s001
-      s003 = F"{disptype}"
-      s004 = F"{node.parent: 3}"
-      s005 = ""
+   # initialize root
+   #
+   graph = g_encoder['scene_graph']
+   graph_lookup = g_encoder['graph_lookup']
+   graph["obj"] = None
+   graph["depth"] = 0
+   graph["children"] = []
+   graph["uid"] = 0
+   graph["parent"] = None
 
-      if classtype == 'k_classtype_skin':
-         armature_def['obj'].data.pose_position = POSE_OR_REST_CACHE
-         s005 = F" [armature -> {armature_def['obj'].cv_data.uid}]"
+   def _new_uid():
+   #{
+      global g_encoder
+      uid = g_encoder['uid_count']
+      g_encoder['uid_count'] += 1
+      return uid
+   #}
 
-      scmp = F"{s002:<32} {s003:<22} {s004} {s005}"
-      print( scmp )
-      
-      if classtype == 'k_classtype_INSTANCE' or \
-         classtype == 'k_classtype_BONE' or \
-         classtype == 'k_classtype_SKELETON' or \
-         classtype == 'k_classtype_SKIN':
-         print( "ERROR: user classtype cannot be _INSTANCE or _BONE" )
-         node.classtype = 0
-         node.offset = 0
-
-      elif classtype == 'k_classtype_skin':
-         node.classtype = 12
-
-         armature = armature_def['obj']
-         header.entdata_length += sizeof( classtype_skin )
-
-         skin = classtype_skin()
-         skin.skeleton = armature.cv_data.uid
-         entdata_buffer += [skin]
-      
-      elif classtype == 'k_classtype_skeleton':
-         node.classtype = 11
-         header.entdata_length += sizeof( classtype_skeleton )
-         skeleton = classtype_skeleton()
-
-         armature_def = graph_lookup[obj]
-         armature = obj
-         bones = armature_def['bones']
-         skeleton.channels = len(bones)
-         skeleton.ik_count = armature_def["ik_count"]
-         skeleton.collider_count = armature_def["collider_count"]
+   for obj in collection.all_objects:
+   #{
+      if obj.parent: continue
+
+      def _extend( p, n, d ):
+      #{
+         uid = _new_uid()
+         tree = {}
+         tree["uid"] = uid
+         tree["children"] = []
+         tree["depth"] = d
+         tree["obj"] = n
+         tree["parent"] = p
+         n.cv_data.uid = uid
          
-         if armature.animation_data:
-            previous_frame = bpy.context.scene.frame_current
-            previous_action = armature.animation_data.action
-
-            skeleton.anim_start = len(anim_buffer)
-            skeleton.anim_count = 0
-
-            for NLALayer in obj.animation_data.nla_tracks:
-               for NLAStrip in NLALayer.strips:
-                  # Use action
-                  for a in bpy.data.actions:
-                     if a.name == NLAStrip.name:
-                        armature.animation_data.action = a
-                        break
-
-                  anim_start = int(NLAStrip.action_frame_start)
-                  anim_end   = int(NLAStrip.action_frame_end)
-
-                  # export strips
-                  anim = mdl_animation()
-                  anim.pstr_name = emplace_string( NLAStrip.action.name )
-                  anim.rate = 30.0
-                  anim.offset = header.keyframe_count
-                  anim.length = anim_end-anim_start
-                  
-                  # Export the fucking keyframes
-                  for frame in range(anim_start,anim_end):
-                     bpy.context.scene.frame_set(frame)
-                     
-                     for bone_name in bones:
-                        for pb in armature.pose.bones:
-                           if pb.name == bone_name:
-                              rb = armature.data.bones[ bone_name ]
-                              
-                              # relative bone matrix
-                              if rb.parent is not None:
-                                 offset_mtx = rb.parent.matrix_local
-                                 offset_mtx = offset_mtx.inverted_safe() @ \
-                                              rb.matrix_local
-
-                                 inv_parent = pb.parent.matrix @ offset_mtx
-                                 inv_parent.invert_safe()
-                                 fpm = inv_parent @ pb.matrix 
-                              else:
-                                 bone_mtx = rb.matrix.to_4x4()
-                                 local_inv = rb.matrix_local.inverted_safe()
-                                 fpm = bone_mtx @ local_inv @ pb.matrix
-
-                              loc, rot, sca = fpm.decompose()
-
-                              # local position
-                              final_pos = Vector(( loc[0], loc[2], -loc[1] ))
-
-                              # rotation
-                              lc_m = pb.matrix_channel.to_3x3()
-                              if pb.parent is not None:
-                                 smtx = pb.parent.matrix_channel.to_3x3()
-                                 lc_m = smtx.inverted() @ lc_m
-                              rq = lc_m.to_quaternion()
-
-                              kf = mdl_keyframe()
-                              kf.co[0] =  final_pos[0]
-                              kf.co[1] =  final_pos[1]
-                              kf.co[2] =  final_pos[2]
-
-                              kf.q[0] =  rq[1]
-                              kf.q[1] =  rq[3]
-                              kf.q[2] = -rq[2]
-                              kf.q[3] =  rq[0]
-                              
-                              # scale
-                              kf.s[0] = sca[0]
-                              kf.s[1] = sca[2]
-                              kf.s[2] = sca[1]
-
-                              animdata_buffer += [kf]
-                              header.keyframe_count += 1
-                              break
-
-                  anim_buffer += [anim]
-                  skeleton.anim_count += 1
-
-                  s000 = F"  [{uid: 3}/{header.node_count-1}]" + " |"*(depth-1)
-                  print( F"{s000} | *anim: {NLAStrip.action.name}" )
+         # Descend into amature
+         #
+         if n.type == 'ARMATURE':
+         #{
+            tree["bones"] = [None] # None is the root transform
+            tree["ik_count"] = 0
+            tree["collider_count"] = 0
             
-            bpy.context.scene.frame_set( previous_frame )
-            armature.animation_data.action = previous_action
-
-         entdata_buffer += [skeleton]
-
-      elif classtype == 'k_classtype_bone':
-         node.classtype = 10
-         header.entdata_length += sizeof( classtype_bone )
-         
-         bone = classtype_bone()
-         bone.deform = node_def['deform']
+            # Here also collects some information about constraints, ik and 
+            # counts colliders for the armature.
+            #
+            def _extendb( p, n, d ):
+            #{
+               nonlocal tree
+
+               btree = {}
+               btree["bone"] = n
+               btree["linked_armature"] = tree
+               btree["uid"] = _new_uid()
+               btree["children"] = []
+               btree["depth"] = d
+               btree["parent"] = p
+               tree["bones"] += [n.name]
+
+               for c in n.children:
+               #{
+                  _extendb( btree, c, d+1 )
+               #}
+
+               for c in tree['obj'].pose.bones[n.name].constraints:
+               #{
+                  if c.type == 'IK':
+                  #{
+                     btree["ik_target"] = c.subtarget
+                     btree["ik_pole"] = c.pole_subtarget
+                     tree["ik_count"] += 1
+                  #}
+               #}
+
+               if n.cv_data.collider:
+                  tree['collider_count'] += 1
+
+               btree['deform'] = n.use_deform
+               p['children'] += [btree]
+            #}
+
+            for b in n.data.bones:
+               if not b.parent:
+                  _extendb( tree, b, d+1 )
+            #}
+         #}
          
-         if 'target' in node_def:
-            bone.ik_target = armature_def['bones'].index( node_def['target'] )
-            bone.ik_pole   = armature_def['bones'].index( node_def['pole'] )
-         else:
-            bone.ik_target = 0
-            bone.ik_pole = 0
-
-         bone.collider = 1 if obj.cv_data.collider else 0
-         if obj.cv_data.collider:
-            bone.hitbox[0][0] =  obj.cv_data.v0[0]
-            bone.hitbox[0][1] =  obj.cv_data.v0[2]
-            bone.hitbox[0][2] = -obj.cv_data.v1[1]
-            bone.hitbox[1][0] =  obj.cv_data.v1[0]
-            bone.hitbox[1][1] =  obj.cv_data.v1[2]
-            bone.hitbox[1][2] = -obj.cv_data.v0[1]
-         else:
-            bone.hitbox[0][0] = 0.0
-            bone.hitbox[0][1] = 0.0
-            bone.hitbox[0][2] = 0.0
-            bone.hitbox[1][0] = 0.0
-            bone.hitbox[1][1] = 0.0
-            bone.hitbox[1][2] = 0.0
-
-         if obj.cv_data.con0:
-            bone.use_limits = 1 
-            bone.angle_limits[0][0] =  obj.cv_data.mins[0]
-            bone.angle_limits[0][1] =  obj.cv_data.mins[2]
-            bone.angle_limits[0][2] = -obj.cv_data.maxs[1]
-            bone.angle_limits[1][0] =  obj.cv_data.maxs[0]
-            bone.angle_limits[1][1] =  obj.cv_data.maxs[2]
-            bone.angle_limits[1][2] = -obj.cv_data.mins[1]
-         else:
-            bone.use_limits = 0
-            bone.angle_limits[0][0] = 0.0
-            bone.angle_limits[0][1] = 0.0
-            bone.angle_limits[0][2] = 0.0
-            bone.angle_limits[1][0] = 0.0
-            bone.angle_limits[1][1] = 0.0
-            bone.angle_limits[1][2] = 0.0
-
-         bone.deform = node_def['deform']
-         entdata_buffer += [bone]
-
-      elif classtype == 'k_classtype_gate':
-         node.classtype = 1
-         header.entdata_length += sizeof( classtype_gate )
-
-         gate = classtype_gate()
-         gate.target = 0
-         if obj.cv_data.target != None:
-            gate.target = obj.cv_data.target.cv_data.uid
-
-         if obj.type == 'MESH':
-            gate.dims[0] = obj.data.cv_data.v0[0]
-            gate.dims[1] = obj.data.cv_data.v0[1]
-            gate.dims[2] = obj.data.cv_data.v0[2]
-         else:
-            gate.dims[0] = obj.cv_data.v0[0]
-            gate.dims[1] = obj.cv_data.v0[1]
-            gate.dims[2] = obj.cv_data.v0[2]
-
-         entdata_buffer += [gate]
+         # Recurse into children of this object
+         #
+         for obj1 in n.children:
+         #{
+            nonlocal collection
+            for c1 in obj1.users_collection:
+            #{
+               if c1 == collection:
+               #{
+                  _extend( tree, obj1, d+1 )
+                  break
+               #}
+            #}
+         #}
 
-      elif classtype == 'k_classtype_block':
-         node.classtype = 2
-         header.entdata_length += sizeof( classtype_block )
+         p["children"] += [tree]
+         graph_lookup[n] = tree
 
-         source = obj.data.cv_data
+      #}
 
-         block = classtype_block()
-         block.bbx[0][0] =  source.v0[0]
-         block.bbx[0][1] =  source.v0[2]
-         block.bbx[0][2] = -source.v1[1]
+      _extend( graph, obj, 1 )
 
-         block.bbx[1][0] =  source.v1[0]
-         block.bbx[1][1] =  source.v1[2]
-         block.bbx[1][2] = -source.v0[1]
-         entdata_buffer += [block]
+   #}
+#}
 
-      elif classtype == 'k_classtype_achievement_box':
-         node.classtype = 13
 
-         header.entdata_length += sizeof( classtype_achievement_box )
-         ach = classtype_achievement_box()
-         ach.pstr_name = emplace_string( obj.cv_data.strp )
-         ach.trigger = 0
+# Kind of a useless thing i made but it looks cool and adds complexity!!1
+#
+def encoder_graph_iterator( root ):
+#{
+   for c in root['children']:
+   #{
+      yield c
+      yield from encoder_graph_iterator(c)
+   #}
+#}
 
-         if obj.cv_data.target != None:
-            ach.trigger = obj.cv_data.target.cv_data.uid
 
-         entdata_buffer += [ach]
+# Push a vertex into the model file, or return a cached index (c_uint32)
+#
+def encoder_vertex_push( vertex_reference, co,norm,uv,colour,groups,weights ):
+#{
+   global g_encoder
+   buffer = g_encoder['data']['vertex']
 
-      elif classtype == 'k_classtype_audio':
-         node.classtype = 14
+   TOLERENCE = 4
+   m = float(10**TOLERENCE)
+   
+   # Would be nice to know if this can be done faster than it currently runs,
+   # its quite slow.
+   #
+   key = (int(co[0]*m+0.5),
+          int(co[1]*m+0.5),
+          int(co[2]*m+0.5),
+          int(norm[0]*m+0.5),
+          int(norm[1]*m+0.5),
+          int(norm[2]*m+0.5),
+          int(uv[0]*m+0.5),
+          int(uv[1]*m+0.5),
+          colour[0]*m+0.5,    # these guys are already quantized
+          colour[1]*m+0.5,    # .
+          colour[2]*m+0.5,    # .
+          colour[3]*m+0.5,    # .
+          weights[0]*m+0.5,   # v
+          weights[1]*m+0.5,
+          weights[2]*m+0.5,
+          weights[3]*m+0.5,
+          groups[0]*m+0.5,
+          groups[1]*m+0.5,
+          groups[2]*m+0.5,
+          groups[3]*m+0.5)
+
+   if key in vertex_reference:
+      return vertex_reference[key]
+   else:
+   #{
+      index = c_uint32( len(vertex_reference) )
+      vertex_reference[key] = index
+
+      v = mdl_vert()
+      v.co[0]       =  co[0]
+      v.co[1]       =  co[2]
+      v.co[2]       = -co[1]
+      v.norm[0]     =  norm[0]
+      v.norm[1]     =  norm[2]
+      v.norm[2]     = -norm[1]
+      v.uv[0]       =  uv[0]
+      v.uv[1]       =  uv[1]
+      v.colour[0]   =  colour[0]
+      v.colour[1]   =  colour[1]
+      v.colour[2]   =  colour[2]
+      v.colour[3]   =  colour[3]
+      v.weights[0]  =  weights[0]
+      v.weights[1]  =  weights[1]
+      v.weights[2]  =  weights[2]
+      v.weights[3]  =  weights[3]
+      v.groups[0]   =  groups[0]
+      v.groups[1]   =  groups[1]
+      v.groups[2]   =  groups[2]
+      v.groups[3]   =  groups[3]
+
+      buffer += [v]
+      return index
+   #}
+#}
+
+
+# Compile a mesh (or use one from the cache) onto node, based on node_def
+# No return value
+#
+def encoder_compile_mesh( node, node_def ):
+#{
+   global g_encoder
+   
+   graph         = g_encoder['scene_graph']
+   graph_lookup  = g_encoder['graph_lookup']
+   mesh_cache    = g_encoder['mesh_cache']
+   obj           = node_def['obj']
+   armature_def  = None
+   can_use_cache = True
+   
+   # Check for modifiers that typically change the data per-instance
+   # there is no well defined rule for the choices here, its just what i've
+   # needed while producing the game.
+   #
+   # It may be possible to detect these cases automatically.
+   #
+   for mod in obj.modifiers:
+   #{
+      if mod.type == 'DATA_TRANSFER' or mod.type == 'SHRINKWRAP' or \
+         mod.type == 'BOOLEAN' or mod.type == 'CURVE' or \
+         mod.type == 'ARRAY':
+      #{
+         can_use_cache = False
+      #}
+
+      if mod.type == 'ARMATURE':
+         armature_def = graph_lookup[mod.object]
+
+   # Check the cache first
+   #
+   if can_use_cache and (obj.data.name in mesh_cache):
+   #{
+      ref = mesh_cache[obj.data.name]
+      node.submesh_start = ref.submesh_start
+      node.submesh_count = ref.submesh_count
+      return
+   #}
+
+   # Compile a whole new mesh
+   #
+   node.submesh_start = len( g_encoder['data']['submesh'] )
+   node.submesh_count = 0
 
-         header.entdata_length += sizeof( classtype_audio )
-         aud = classtype_audio()
-         aud.pstr_file = emplace_string( obj.cv_data.strp )
-         aud.flags = obj.cv_data.intp
-         aud.volume = obj.cv_data.fltp
+   default_mat = c_uint32(12345)
+   default_mat.name = ""
 
-         entdata_buffer += [aud]
+   dgraph = bpy.context.evaluated_depsgraph_get()
+   data = obj.evaluated_get(dgraph).data
+   data.calc_loop_triangles()
+   data.calc_normals_split()
+   
+   # Mesh is split into submeshes based on their material
+   #
+   mat_list = data.materials if len(data.materials) > 0 else [default_mat]
+   for material_id, mat in enumerate(mat_list):
+   #{
+      mref = {}
+
+      sm = mdl_submesh()
+      sm.indice_start = len( g_encoder['data']['indice'] )
+      sm.vertex_start = len( g_encoder['data']['vertex'] )
+      sm.vertex_count = 0
+      sm.indice_count = 0
+      sm.material_id = encoder_process_material( mat )
+
+      for i in range(3):
+      #{
+         sm.bbx[0][i] =  999999
+         sm.bbx[1][i] = -999999
+      #}
+      
+      # Keep a reference to very very very similar vertices
+      #
+      vertex_reference = {}
 
-      elif classtype == 'k_classtype_spawn':
-         node.classtype = 3
+      # Write the vertex / indice data
+      #
+      for tri_index, tri in enumerate(data.loop_triangles):
+      #{
+         if tri.material_index != material_id:
+            continue
+
+         for j in range(3):
+         #{
+            vert = data.vertices[tri.vertices[j]]
+            li = tri.loops[j]
+            vi = data.loops[li].vertex_index
+            
+            # Gather vertex information
+            #
+            co      = vert.co
+            norm    = data.loops[li].normal
+            uv      = (0,0)
+            colour  = (255,255,255,255)
+            groups  = [0,0,0,0]
+            weights = [0,0,0,0]
+
+            # Uvs
+            #
+            if data.uv_layers:
+               uv = data.uv_layers.active.data[li].uv
+            
+            # Vertex Colours
+            #
+            if data.vertex_colors:
+            #{
+               colour = data.vertex_colors.active.data[li].color
+               colour = (int(colour[0]*255.0),\
+                         int(colour[1]*255.0),\
+                         int(colour[2]*255.0),\
+                         int(colour[3]*255.0))
+            #}
+            
+            # Weight groups: truncates to the 3 with the most influence. The
+            #                fourth bone ID is never used by the shader so it is
+            #                always 0
+            #
+            if armature_def:
+            #{
+               src_groups = [_ for _ in data.vertices[vi].groups \
+                              if obj.vertex_groups[_.group].name in \
+                                 armature_def['bones']]
+
+               weight_groups = sorted( src_groups, key = \
+                                       lambda a: a.weight, reverse=True )
+               tot = 0.0
+               for ml in range(3):
+               #{
+                  if len(weight_groups) > ml:
+                  #{
+                     g = weight_groups[ml]
+                     name = obj.vertex_groups[g.group].name
+                     weight = g.weight
+
+                     weights[ml] = weight
+                     groups[ml] = armature_def['bones'].index(name)
+                     tot += weight
+                  #}
+               #}
+            
+               if len(weight_groups) > 0:
+               #{
+                  inv_norm = (1.0/tot) * 65535.0
+                  for ml in range(3):
+                  #{
+                     weights[ml] = int( weights[ml] * inv_norm )
+                     weights[ml] = min( weights[ml], 65535 )
+                     weights[ml] = max( weights[ml], 0 )
+                  #}
+               #}
+            
+            # Add vertex and expand bound box
+            #
+            index = encoder_vertex_push( vertex_reference, co, \
+                                                           norm, \
+                                                           uv, \
+                                                           colour, \
+                                                           groups, \
+                                                           weights )
+            g_encoder['data']['indice'] += [index]
+         #}
+      #}
+      
+      # How many unique verts did we add in total
+      #
+      sm.vertex_count = len(g_encoder['data']['vertex']) - sm.vertex_start
+      sm.indice_count = len(g_encoder['data']['indice']) - sm.indice_start
+      
+      # Make sure bounding box isn't -inf -> inf if no vertices
+      #
+      if sm.vertex_count == 0:
+         for j in range(2):
+            for i in range(3):
+               sm.bbx[j][i] = 0
+      else:
+      #{
+         for j in range(sm.vertex_count):
+         #{
+            vert = g_encoder['data']['vertex'][ sm.vertex_start + j ]
 
-      elif classtype == 'k_classtype_water':
-         node.classtype = 4
+            for i in range(3):
+            #{
+               sm.bbx[0][i] = min( sm.bbx[0][i], vert.co[i] )
+               sm.bbx[1][i] = max( sm.bbx[1][i], vert.co[i] )
+            #}
+         #}
+      #}
+      
+      # Add submesh to encoder
+      #
+      g_encoder['data']['submesh'] += [sm]
+      node.submesh_count += 1
+
+   #}
+
+   # Save a reference to this node since we want to reuse the submesh indices
+   # later.
+   g_encoder['mesh_cache'][obj.data.name] = node
+#}
+
+
+def encoder_compile_ent_as( name, node, node_def ):
+#{
+   global g_encoder
+
+   if name == 'classtype_none':
+   #{
+      node.offset = 0
+      node.classtype = 0
+      return
+   #}
+   elif name not in globals():
+   #{
+      print( "Classtype '" +name + "' is unknown!" )
+      return
+   #}
+   
+   buffer = g_encoder['data']['entdata']
+   node.offset = len(buffer)
 
-      elif classtype == 'k_classtype_car_path':
-         node.classtype = 5
-         header.entdata_length += sizeof( classtype_car_path )
+   cl = globals()[ name ]
+   inst = cl()
+   inst.encode_obj( node, node_def )
 
-         pn = classtype_car_path()
-         pn.target = 0
-         pn.target1 = 0
+   buffer.extend( bytearray(inst) )
+   bytearray_align_to( buffer, 4 )
+#}
 
-         if obj.cv_data.target != None: 
-            pn.target = obj.cv_data.target.cv_data.uid
-         if obj.cv_data.target1 != None: 
-            pn.target1 = obj.cv_data.target1.cv_data.uid
+# Compiles animation data into model and gives us some extra node_def entries
+#
+def encoder_compile_armature( node, node_def ):
+#{
+   global g_encoder
+   
+   entdata       = g_encoder['data']['entdata']
+   animdata      = g_encoder['data']['anim']
+   keyframedata  = g_encoder['data']['keyframe']
+   mesh_cache    = g_encoder['mesh_cache']
+   obj           = node_def['obj']
+   bones         = node_def['bones']
+
+   # extra info
+   node_def['anim_start'] = len(animdata)
+   node_def['anim_count'] = 0
+   
+   # Compile anims
+   #
+   if obj.animation_data:
+   #{
+      # So we can restore later
+      #
+      previous_frame  = bpy.context.scene.frame_current
+      previous_action = obj.animation_data.action
+      POSE_OR_REST_CACHE = obj.data.pose_position
+      obj.data.pose_position = 'POSE'
+
+      for NLALayer in obj.animation_data.nla_tracks:
+      #{
+         for NLAStrip in NLALayer.strips:
+         #{
+            # set active
+            #
+            for a in bpy.data.actions:
+            #{
+               if a.name == NLAStrip.name:
+               #{
+                  obj.animation_data.action = a
+                  break
+               #}
+            #}
+            
+            # Clip to NLA settings
+            #
+            anim_start = int(NLAStrip.action_frame_start)
+            anim_end   = int(NLAStrip.action_frame_end)
 
-         entdata_buffer += [pn]
+            # Export strips
+            #
+            anim = mdl_animation()
+            anim.pstr_name = encoder_process_pstr( NLAStrip.action.name )
+            anim.rate = 30.0
+            anim.offset = len(keyframedata)
+            anim.length = anim_end-anim_start
+            
+            # Export the keyframes
+            for frame in range(anim_start,anim_end):
+            #{
+               bpy.context.scene.frame_set(frame)
+               
+               for bone_name in bones:
+               #{
+                  for pb in obj.pose.bones:
+                  #{
+                     if pb.name != bone_name: continue
 
-      elif obj.is_instancer:
-         target = obj.instance_collection
+                     rb = obj.data.bones[ bone_name ]
+                     
+                     # relative bone matrix
+                     if rb.parent is not None:
+                     #{
+                        offset_mtx = rb.parent.matrix_local
+                        offset_mtx = offset_mtx.inverted_safe() @ \
+                                     rb.matrix_local
+
+                        inv_parent = pb.parent.matrix @ offset_mtx
+                        inv_parent.invert_safe()
+                        fpm = inv_parent @ pb.matrix 
+                     #}
+                     else:
+                     #{
+                        bone_mtx = rb.matrix.to_4x4()
+                        local_inv = rb.matrix_local.inverted_safe()
+                        fpm = bone_mtx @ local_inv @ pb.matrix
+                     #}
+
+                     loc, rot, sca = fpm.decompose()
+
+                     # local position
+                     final_pos = Vector(( loc[0], loc[2], -loc[1] ))
+
+                     # rotation
+                     lc_m = pb.matrix_channel.to_3x3()
+                     if pb.parent is not None:
+                     #{
+                        smtx = pb.parent.matrix_channel.to_3x3()
+                        lc_m = smtx.inverted() @ lc_m
+                     #}
+                     rq = lc_m.to_quaternion()
+
+                     kf = mdl_keyframe()
+                     kf.co[0] =  final_pos[0]
+                     kf.co[1] =  final_pos[1]
+                     kf.co[2] =  final_pos[2]
+
+                     kf.q[0] =  rq[1]
+                     kf.q[1] =  rq[3]
+                     kf.q[2] = -rq[2]
+                     kf.q[3] =  rq[0]
+                     
+                     # scale
+                     kf.s[0] = sca[0]
+                     kf.s[1] = sca[2]
+                     kf.s[2] = sca[1]
 
-         node.classtype = 6
-         header.entdata_length += sizeof( classtype_instance )
+                     keyframedata += [kf]
+                     break
+                  #}
+               #}
+            #}
+            
+            # Add to animation buffer
+            #
+            animdata += [anim]
+            node_def['anim_count'] += 1
 
-         inst = classtype_instance()
-         inst.pstr_file = emplace_string( F"models/{target.name}.mdl" )
-         entdata_buffer += [inst]
+            # Report progress
+            #
+            status_name = F"            " + " |"*(node_def['depth']-1)
+            print( F"{status_name} | *anim: {NLAStrip.action.name}" )
+         #}
+      #}
+      
+      # Restore context to how it was before
+      #
+      bpy.context.scene.frame_set( previous_frame )
+      obj.animation_data.action = previous_action
+      obj.data.pose_position = POSE_OR_REST_CACHE
+   #}
+#}
 
-      elif classtype == 'k_classtype_capsule':
-         node.classtype = 7
+# We are trying to compile this node_def
+#
+def encoder_process_definition( node_def ):
+#{
+   global g_encoder
 
-      elif classtype == 'k_classtype_route_node':
-         node.classtype = 8
-         header.entdata_length += sizeof( classtype_route_node )
+   # data sources for object/bone are taken differently
+   #
+   if 'obj' in node_def:
+   #{
+      obj      = node_def['obj']
+      obj_type = obj.type
+      obj_co   = obj.location
+
+      if obj_type == 'ARMATURE':
+         obj_classtype = 'classtype_skeleton'
+      else:
+      #{
+         obj_classtype = obj.cv_data.classtype
 
-         rn = classtype_route_node()
-         if obj.cv_data.target != None: 
-            rn.target = obj.cv_data.target.cv_data.uid
-         if obj.cv_data.target1 != None: 
-            rn.target1 = obj.cv_data.target1.cv_data.uid
+         # Check for armature deform
+         #
+         for mod in obj.modifiers:
+         #{
+            if mod.type == 'ARMATURE':
+            #{
+               obj_classtype = 'classtype_skin'
 
-         entdata_buffer += [rn]
+               # Make sure to freeze armature in rest while we collect 
+               # vertex information
+               #
+               armature_def = g_encoder['graph_lookup'][mod.object]
+               POSE_OR_REST_CACHE = armature_def['obj'].data.pose_position
+               armature_def['obj'].data.pose_position = 'REST'
+               node_def['linked_armature'] = armature_def
+               break
+            #}
+         #}
+      #}
+   #}
+
+   elif 'bone' in node_def:
+   #{
+      obj      = node_def['bone']
+      obj_type = 'BONE'
+      obj_co   = obj.head_local
+      obj_classtype = 'classtype_bone'
+   #}
+
+   # Create node
+   #
+   node = mdl_node()
+   node.pstr_name = encoder_process_pstr( obj.name )
 
-      elif classtype == 'k_classtype_route':
-         node.classtype = 9
-         header.entdata_length += sizeof( classtype_route )
-         r = classtype_route()
-         r.colour[0] = obj.cv_data.colour[0]
-         r.colour[1] = obj.cv_data.colour[1]
-         r.colour[2] = obj.cv_data.colour[2]
+   if node_def["parent"]:
+      node.parent = node_def["parent"]["uid"]
 
-         if obj.cv_data.target != None: 
-            r.id_start = obj.cv_data.target.cv_data.uid
+   # Setup transform
+   #
+   node.co[0] =  obj_co[0]
+   node.co[1] =  obj_co[2]
+   node.co[2] = -obj_co[1]
+   
+   # Convert rotation quat to our space type
+   #
+   quat = obj.matrix_local.to_quaternion()
+   node.q[0] =  quat[1]
+   node.q[1] =  quat[3]
+   node.q[2] = -quat[2]
+   node.q[3] =  quat[0]
+   
+   # Bone scale is just a vector to the tail
+   #
+   if obj_type == 'BONE':
+   #{
+      node.s[0] =  obj.tail_local[0] - node.co[0]
+      node.s[1] =  obj.tail_local[2] - node.co[1]
+      node.s[2] = -obj.tail_local[1] - node.co[2]
+   #}
+   else:
+   #{
+      node.s[0] = obj.scale[0]
+      node.s[1] = obj.scale[2]
+      node.s[2] = obj.scale[1]
+   #}
+   
+   # Report status
+   #
+   tot_uid   = g_encoder['uid_count']-1
+   obj_uid   = node_def['uid']
+   obj_depth = node_def['depth']-1
 
-         entdata_buffer += [r]
+   status_id   = F"    [{obj_uid: 3}/{tot_uid}]" + " |"*obj_depth
+   status_name = status_id + F" L {obj.name}"
 
-      # classtype == 'k_classtype_none':
-      else:
-         node.classtype = 0
-         node.offset = 0
+   if obj_classtype != 'classtype_none': status_type = obj_classtype
+   else: status_type = obj_type
 
-      node_buffer += [node]
+   status_parent = F"{node.parent: 3}"
+   status_armref = ""
 
-   # Write data arrays
-   # TODO: 8 BYTE ALIGNMENT
+   if obj_classtype == 'classtype_skin':
+      status_armref = F" [armature -> {armature_def['obj'].cv_data.uid}]"
 
-   print( "Writing data" )
-   fpos = sizeof(header)
+   print(F"{status_name:<32} {status_type:<22} {status_parent} {status_armref}")
 
-   print( F"Nodes: {header.node_count}" )
-   header.node_offset = fpos
-   fpos += sizeof(mdl_node)*header.node_count
+   # Process mesh if needed
+   # 
+   if obj_type == 'MESH':
+   #{
+      encoder_compile_mesh( node, node_def )
+   #}
+   elif obj_type == 'ARMATURE':
+   #{
+      encoder_compile_armature( node, node_def )
+   #}
 
-   print( F"Submeshes: {header.submesh_count}" )
-   header.submesh_offset = fpos
-   fpos += sizeof(mdl_submesh)*header.submesh_count
+   encoder_compile_ent_as( obj_classtype, node, node_def )
 
-   print( F"Materials: {header.material_count}" )
-   header.material_offset = fpos
-   fpos += sizeof(mdl_material)*header.material_count
+   # Make sure to reset the armature we just mucked about with
+   #
+   if obj_classtype == 'classtype_skin':
+      armature_def['obj'].data.pose_position = POSE_OR_REST_CACHE
 
-   print( F"Animation count: {len(anim_buffer)}" )
-   header.anim_count = len(anim_buffer)
-   header.anim_offset = fpos
-   fpos += sizeof(mdl_animation)*header.anim_count
+   g_encoder['data']['node'] += [node]
+#}
 
-   print( F"Entdata length: {header.entdata_length}" )
-   header.entdata_offset = fpos
-   fpos += header.entdata_length
-   
-   print( F"Strings length: {len(strings_buffer)}" )
-   header.strings_offset = fpos
-   header.strings_length = len(strings_buffer)
-   fpos += header.strings_length
+# The post processing step or the pre processing to the writing step
+#
+def encoder_write_to_file( path ):
+#{
+   global g_encoder
    
-   # Optional array things
-   print( F"Keyframe count: {header.keyframe_count}" )
-   header.keyframe_offset = fpos
-   fpos += sizeof(mdl_keyframe)*header.keyframe_count
+   # Compile down to a byte array
+   #
+   header = g_encoder['header']
+   file_pos = sizeof(header)
+   file_data = bytearray()
+   print( "  Compositing data arrays" )
    
-   print( F"Vertex count: {header.vertex_count}" )
-   header.vertex_offset = fpos
-   fpos += sizeof(mdl_vert)*header.vertex_count
+   for array_name in g_encoder['data']:
+   #{
+      file_pos += bytearray_align_to( file_data, 16, sizeof(header) )
+      arr = g_encoder['data'][array_name]
 
-   print( F"Indice count: {header.indice_count}" )
-   header.indice_offset = fpos
-   fpos += sizeof(c_uint32)*header.indice_count
+      setattr( header, array_name + "_offset", file_pos )
 
-   header.file_length = fpos
+      print( F"    {array_name:<16} @{file_pos:> 8X}[{len(arr)}]" )
 
-   path = F"/home/harry/Documents/carve/models_src/{collection_name}.mdl"
+      if isinstance( arr, bytearray ):
+      #{
+         setattr( header, array_name + "_size", len(arr) )
+
+         file_data.extend( arr )
+         file_pos += len(arr)
+      #}
+      else:
+      #{
+         setattr( header, array_name + "_count", len(arr) )
+
+         for item in arr:
+         #{
+            bbytes = bytearray(item)
+            file_data.extend( bbytes )
+            file_pos += sizeof(item)
+         #}
+      #}
+   #}
+
+   # This imperitive for this field to be santized in the future!
+   #
+   header.file_length = file_pos
+
+   print( "  Writing file" )
+   # Write header and data chunk to file
+   #
    fp = open( path, "wb" )
-             
    fp.write( bytearray( header ) )
+   fp.write( file_data )
+   fp.close()
+#}
+
+# Main compiler, uses string as the identifier for the collection
+# 
+def write_model(collection_name):
+#{
+   global g_encoder
+   print( F"Model graph | Create mode '{collection_name}'" )
    
-   for node in node_buffer:
-      fp.write( bytearray(node) )
-   for sm in submesh_buffer:
-      fp.write( bytearray(sm) )
-   for mat in material_buffer:
-      fp.write( bytearray(mat) )
-   for a in anim_buffer:
-      fp.write( bytearray(a) )
-   for ed in entdata_buffer:
-      fp.write( bytearray(ed) )
-
-   fp.write( strings_buffer )
-
-   for kf in animdata_buffer:
-      fp.write( bytearray(kf) )
-
-   for v in vertex_buffer:
-      fp.write( bytearray(v) )
-   for i in indice_buffer:
-      fp.write( bytearray(i) )
+   collection = bpy.data.collections[collection_name]
 
-   fp.close()
+   encoder_init()
+   encoder_build_scene_graph( collection )
+
+   # Compile 
+   #
+   print( "  Comping objects" )
+   it = encoder_graph_iterator( g_encoder['scene_graph'] )
+   for node_def in it:
+      encoder_process_definition( node_def )
+
+   # Write 
+   #
+   # TODO HOLY
+   path = F"/home/harry/Documents/carve/models_src/{collection_name}.mdl"
+   encoder_write_to_file( path )
 
    print( F"Completed {collection_name}.mdl" )
+#}
+
 
 # Clicky clicky GUI
 # ------------------------------------------------------------------------------
@@ -1158,7 +1438,7 @@ def cv_draw():
                               (0,0,1,1))
                
 
-      if obj.cv_data.classtype == 'k_classtype_gate':
+      if obj.cv_data.classtype == 'classtype_gate':
          if obj.type == 'MESH':
             dims = obj.data.cv_data.v0
          else:
@@ -1190,7 +1470,7 @@ def cv_draw():
          if obj.cv_data.target != None:
             drawbline( obj.location, obj.cv_data.target.location, sw,sw )
 
-      elif obj.cv_data.classtype == 'k_classtype_route_node':
+      elif obj.cv_data.classtype == 'classtype_route_node':
          sw = Vector((0.4,0.4,0.4,0.2))
          sw2 = Vector((1.5,0.2,0.2,0.0))
          if obj.cv_data.target != None:
@@ -1205,7 +1485,7 @@ def cv_draw():
                obj.matrix_world.to_quaternion() @ Vector((0,0,-6+1.5))
          drawbline( obj.location, p1, sw,sw2 )
 
-      elif obj.cv_data.classtype == 'k_classtype_achievement_box':
+      elif obj.cv_data.classtype == 'classtype_achievement_box':
          a = Vector((-1,-1,-1))
          b = Vector((1,1,1))
          
@@ -1242,7 +1522,7 @@ def cv_draw():
                colours += [(0,1,1,1),(0,1,1,1)]
 
 
-      elif obj.cv_data.classtype == 'k_classtype_block':
+      elif obj.cv_data.classtype == 'classtype_block':
          a = obj.data.cv_data.v0
          b = obj.data.cv_data.v1
          
@@ -1266,7 +1546,7 @@ def cv_draw():
             verts += [(v1[0],v1[1],v1[2])]
             colours += [(1,1,0,1),(1,1,0,1)]
 
-      elif obj.cv_data.classtype == 'k_classtype_capsule':
+      elif obj.cv_data.classtype == 'classtype_capsule':
          h = obj.data.cv_data.v0[0]
          r = obj.data.cv_data.v0[1]
 
@@ -1291,7 +1571,7 @@ def cv_draw():
             verts += [(v1[0],v1[1],v1[2])]
             colours += [(0.5,1,0,1),(0.5,1,0,1)]
 
-      elif obj.cv_data.classtype == 'k_classtype_spawn':
+      elif obj.cv_data.classtype == 'classtype_spawn':
          vs = [None]*4
          vs[0] = obj.matrix_world @ Vector((0,0,0))
          vs[1] = obj.matrix_world @ Vector((0,2,0))
@@ -1305,7 +1585,7 @@ def cv_draw():
             verts += [(v1[0],v1[1],v1[2])]
             colours += [(0,1,1,1),(0,1,1,1)]
       
-      elif obj.cv_data.classtype == 'k_classtype_route':
+      elif obj.cv_data.classtype == 'classtype_route':
          vs = [None]*2
          vs[0] = obj.location
          vs[1] = obj.cv_data.target.location
@@ -1336,7 +1616,7 @@ def cv_draw():
             targets = [None,None]
             targets[0] = node.cv_data.target
 
-            if node.cv_data.classtype == 'k_classtype_route_node':
+            if node.cv_data.classtype == 'classtype_route_node':
                targets[1] = node.cv_data.target1
             
             nextnode = targets[stack_i[si-1]]
@@ -1368,8 +1648,8 @@ def cv_draw():
             for sj in range(si):
                sk = (sj+1)%si
 
-               if stack[sj].cv_data.classtype == 'k_classtype_gate' and \
-                  stack[sk].cv_data.classtype == 'k_classtype_gate':
+               if stack[sj].cv_data.classtype == 'classtype_gate' and \
+                  stack[sk].cv_data.classtype == 'classtype_gate':
                   dist = (stack[sj].location-stack[sk].location).magnitude
                   drawsbpath( stack[sj], stack[sk], cc*0.4, cc, dist, dist )
 
@@ -1378,7 +1658,7 @@ def cv_draw():
 
             course_count += 1
 
-      elif obj.cv_data.classtype == 'k_classtype_car_path':
+      elif obj.cv_data.classtype == 'classtype_car_path':
          v0 = obj.matrix_world.to_quaternion() @ Vector((0,1,0))
          c0 = Vector((v0.x*0.5+0.5, v0.y*0.5+0.5, 0.0, 1.0))
          drawbhandle( obj, 1.0, (0.9,0.9,0.9,1.0) )
@@ -1406,7 +1686,7 @@ def cv_draw():
 def cv_poll_target(scene, obj):
    if obj == bpy.context.active_object:
       return False
-   if obj.cv_data.classtype == 'k_classtype_none':
+   if obj.cv_data.classtype == 'classtype_none':
       return False
    return True
 
@@ -1434,21 +1714,21 @@ class CV_OBJ_SETTINGS(bpy.types.PropertyGroup):
    classtype: bpy.props.EnumProperty(
       name="Format", 
       items = [
-      ('k_classtype_none', "k_classtype_none", "", 0),
-      ('k_classtype_gate', "k_classtype_gate", "", 1),
-      ('k_classtype_block', "k_classtype_block", "", 2),
-      ('k_classtype_spawn', "k_classtype_spawn", "", 3),
-      ('k_classtype_water', "k_classtype_water", "", 4),
-      ('k_classtype_car_path', "k_classtype_car_path", "", 5),
-      ('k_classtype_INSTANCE', "","", 6 ),
-      ('k_classtype_capsule', "k_classtype_capsule", "", 7 ),
-      ('k_classtype_route_node', "k_classtype_route_node", "", 8 ),
-      ('k_classtype_route', "k_classtype_route", "", 9 ),
-      ('k_classtype_bone',"k_classtype_bone","",10),
-      ('k_classtype_SKELETON', "","", 11 ),
-      ('k_classtype_SKIN',"","",12),
-      ('k_classtype_achievement_box',"k_classtype_achievement_box","",13),
-      ('k_classtype_audio',"k_classtype_audio","",14),
+      ('classtype_none', "classtype_none", "", 0),
+      ('classtype_gate', "classtype_gate", "", 1),
+      ('classtype_block', "classtype_block", "", 2),
+      ('classtype_spawn', "classtype_spawn", "", 3),
+      ('classtype_water', "classtype_water", "", 4),
+      ('classtype_car_path', "classtype_car_path", "", 5),
+      ('classtype_INSTANCE', "","", 6 ),
+      ('classtype_capsule', "classtype_capsule", "", 7 ),
+      ('classtype_route_node', "classtype_route_node", "", 8 ),
+      ('classtype_route', "classtype_route", "", 9 ),
+      ('classtype_bone',"classtype_bone","",10),
+      ('classtype_SKELETON', "","", 11 ),
+      ('classtype_SKIN',"","",12),
+      ('classtype_achievement_box',"classtype_achievement_box","",13),
+      ('classtype_audio',"classtype_audio","",14),
       ])
 
 class CV_BONE_SETTINGS(bpy.types.PropertyGroup):
@@ -1496,25 +1776,33 @@ class CV_OBJ_PANEL(bpy.types.Panel):
    def draw(_,context):
       active_object = bpy.context.active_object
       if active_object == None: return
+      if active_object.type == 'ARMATURE':
+      #{
+         row = _.layout.row()
+         row.enabled = False
+         row.label( text="This object has the intrinsic classtype of skeleton" )
+         return
+      #}
+
       _.layout.prop( active_object.cv_data, "classtype" )
 
-      if active_object.cv_data.classtype == 'k_classtype_gate':
+      if active_object.cv_data.classtype == 'classtype_gate':
          _.layout.prop( active_object.cv_data, "target" )
 
          mesh = active_object.data
          _.layout.label( text=F"(i) Data is stored in {mesh.name}" )
          _.layout.prop( mesh.cv_data, "v0" )
 
-      elif active_object.cv_data.classtype == 'k_classtype_car_path' or \
-           active_object.cv_data.classtype == 'k_classtype_route_node':
+      elif active_object.cv_data.classtype == 'classtype_car_path' or \
+           active_object.cv_data.classtype == 'classtype_route_node':
          _.layout.prop( active_object.cv_data, "target" )
          _.layout.prop( active_object.cv_data, "target1" )
 
-      elif active_object.cv_data.classtype == 'k_classtype_route':
+      elif active_object.cv_data.classtype == 'classtype_route':
          _.layout.prop( active_object.cv_data, "target" )
          _.layout.prop( active_object.cv_data, "colour" )
 
-      elif active_object.cv_data.classtype == 'k_classtype_block':
+      elif active_object.cv_data.classtype == 'classtype_block':
          mesh = active_object.data
 
          _.layout.label( text=F"(i) Data is stored in {mesh.name}" )
@@ -1522,14 +1810,14 @@ class CV_OBJ_PANEL(bpy.types.Panel):
          _.layout.prop( mesh.cv_data, "v1" )
          _.layout.prop( mesh.cv_data, "v2" )
          _.layout.prop( mesh.cv_data, "v3" )
-      elif active_object.cv_data.classtype == 'k_classtype_capsule':
+      elif active_object.cv_data.classtype == 'classtype_capsule':
          mesh = active_object.data
          _.layout.label( text=F"(i) Data is stored in {mesh.name}" )
          _.layout.prop( mesh.cv_data, "v0" )
-      elif active_object.cv_data.classtype == 'k_classtype_achievement_box':
+      elif active_object.cv_data.classtype == 'classtype_achievement_box':
          _.layout.prop( active_object.cv_data, "strp" )
          _.layout.prop( active_object.cv_data, "target" )
-      elif active_object.cv_data.classtype == 'k_classtype_audio':
+      elif active_object.cv_data.classtype == 'classtype_audio':
          _.layout.prop( active_object.cv_data, "strp" )
          _.layout.prop( active_object.cv_data, "intp" )
          _.layout.prop( active_object.cv_data, "fltp" )