Skip to content
Snippets Groups Projects
export_fbx_bin.py 114 KiB
Newer Older
  • Learn to ignore specific revisions
  •     # global matrix, so we need to apply the global matrix to the vertices to get the correct result.
        geom_mat_co = scene_data.settings.global_matrix if do_bake_space_transform else None
        # We need to apply the inverse transpose of the global matrix when transforming normals.
    
        geom_mat_no = Matrix(scene_data.settings.global_matrix_inv_transposed) if do_bake_space_transform else None
        if geom_mat_no is not None:
            # Remove translation & scaling!
            geom_mat_no.translation = Vector()
            geom_mat_no.normalize()
    
        geom = elem_data_single_int64(root, b"Geometry", get_fbxuid_from_key(me_key))
        geom.add_string(fbx_name_class(me.name.encode(), b"Geometry"))
        geom.add_string(b"Mesh")
    
        tmpl = scene_data.templates[b"Geometry"]
        props = elem_properties(geom)
    
        # Custom properties.
        if scene_data.settings.use_custom_properties:
    
            fbx_data_element_custom_properties(props, me)
    
    
        elem_data_single_int32(geom, b"GeometryVersion", FBX_GEOMETRY_VERSION)
    
        # Vertex cos.
    
        t_co = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
    
        me.vertices.foreach_get("co", t_co)
    
        if geom_mat_co is not None:
            def _vcos_transformed_gen(raw_cos, m=None):
                # Note: we could most likely get much better performances with numpy, but will leave this as TODO for now.
                return chain(*(m * Vector(v) for v in zip(*(iter(raw_cos),) * 3)))
            t_co = _vcos_transformed_gen(t_co, geom_mat_co)
    
        elem_data_single_float64_array(geom, b"Vertices", t_co)
        del t_co
    
        # Polygon indices.
        #
        # We do loose edges as two-vertices faces, if enabled...
        #
    
    Bastien Montagne's avatar
    Bastien Montagne committed
        # Note we have to process Edges in the same time, as they are based on poly's loops...
    
        loop_nbr = len(me.loops)
    
        t_pvi = array.array(data_types.ARRAY_INT32, (0,)) * loop_nbr
    
        t_ls = [None] * len(me.polygons)
    
        me.loops.foreach_get("vertex_index", t_pvi)
        me.polygons.foreach_get("loop_start", t_ls)
    
        # Add "fake" faces for loose edges.
        if scene_data.settings.use_mesh_edges:
            t_le = tuple(e.vertices for e in me.edges if e.is_loose)
            t_pvi.extend(chain(*t_le))
            t_ls.extend(range(loop_nbr, loop_nbr + len(t_le), 2))
            del t_le
    
        # Edges...
        # Note: Edges are represented as a loop here: each edge uses a single index, which refers to the polygon array.
        #       The edge is made by the vertex indexed py this polygon's point and the next one on the same polygon.
        #       Advantage: Only one index per edge.
        #       Drawback: Only polygon's edges can be represented (that's why we have to add fake two-verts polygons
        #                 for loose edges).
        #       We also have to store a mapping from real edges to their indices in this array, for edge-mapped data
        #       (like e.g. crease).
        t_eli = array.array(data_types.ARRAY_INT32)
        edges_map = {}
        edges_nbr = 0
        if t_ls and t_pvi:
            t_ls = set(t_ls)
            todo_edges = [None] * len(me.edges) * 2
            me.edges.foreach_get("vertices", todo_edges)
            todo_edges = set((v1, v2) if v1 < v2 else (v2, v1) for v1, v2 in zip(*(iter(todo_edges),) * 2))
    
            li = 0
            vi = vi_start = t_pvi[0]
            for li_next, vi_next in enumerate(t_pvi[1:] + t_pvi[:1], start=1):
                if li_next in t_ls:  # End of a poly's loop.
                    vi2 = vi_start
                    vi_start = vi_next
                else:
                    vi2 = vi_next
    
                e_key = (vi, vi2) if vi < vi2 else (vi2, vi)
                if e_key in todo_edges:
                    t_eli.append(li)
                    todo_edges.remove(e_key)
                    edges_map[e_key] = edges_nbr
                    edges_nbr += 1
    
                vi = vi_next
                li = li_next
        # End of edges!
    
        # We have to ^-1 last index of each loop.
        for ls in t_ls:
            t_pvi[ls - 1] ^= -1
    
        # And finally we can write data!
        elem_data_single_int32_array(geom, b"PolygonVertexIndex", t_pvi)
        elem_data_single_int32_array(geom, b"Edges", t_eli)
        del t_pvi
        del t_ls
        del t_eli
    
        # And now, layers!
    
        # Smoothing.
        if smooth_type in {'FACE', 'EDGE'}:
            t_ps = None
            _map = b""
            if smooth_type == 'FACE':
    
                t_ps = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
    
                me.polygons.foreach_get("use_smooth", t_ps)
                _map = b"ByPolygon"
            else:  # EDGE
                # Write Edge Smoothing.
    
                t_ps = array.array(data_types.ARRAY_INT32, (0,)) * edges_nbr
    
                for e in me.edges:
                    if e.key not in edges_map:
                        continue  # Only loose edges, in theory!
                    t_ps[edges_map[e.key]] = not e.use_edge_sharp
                _map = b"ByEdge"
            lay_smooth = elem_data_single_int32(geom, b"LayerElementSmoothing", 0)
            elem_data_single_int32(lay_smooth, b"Version", FBX_GEOMETRY_SMOOTHING_VERSION)
            elem_data_single_string(lay_smooth, b"Name", b"")
            elem_data_single_string(lay_smooth, b"MappingInformationType", _map)
            elem_data_single_string(lay_smooth, b"ReferenceInformationType", b"Direct")
    
    Bastien Montagne's avatar
    Bastien Montagne committed
            elem_data_single_int32_array(lay_smooth, b"Smoothing", t_ps)  # Sight, int32 for bool...
    
            del t_ps
    
        # TODO: Edge crease (LayerElementCrease).
    
        # And we are done with edges!
        del edges_map
    
        # Loop normals.
        # NOTE: this is not supported by importer currently.
        # XXX Official docs says normals should use IndexToDirect,
        #     but this does not seem well supported by apps currently...
        me.calc_normals_split()
    
            # Great, now normals are also expected 4D!
    
            gen = zip(*(iter(raw_nors),) * 3 + (_infinite_gen(1.0),))
            return gen if m is None else (m * Vector(v) for v in gen)
    
        t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
        me.loops.foreach_get("normal", t_ln)
        t_ln = _nortuples_gen(t_ln, geom_mat_no)
    
            lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
            elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
            elem_data_single_string(lay_nor, b"Name", b"")
            elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
            elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect")
    
    
            elem_data_single_float64_array(lay_nor, b"Normals", chain(*ln2idx))
            # Normal weights, no idea what it is.
    
            t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(ln2idx)
            elem_data_single_float64_array(lay_nor, b"NormalsW", t_lnw)
    
    
            ln2idx = {nor: idx for idx, nor in enumerate(ln2idx)}
    
            elem_data_single_int32_array(lay_nor, b"NormalsIndex", (ln2idx[n] for n in t_ln))
    
        else:
            lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
            elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
            elem_data_single_string(lay_nor, b"Name", b"")
            elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
            elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
    
            elem_data_single_float64_array(lay_nor, b"Normals", chain(*t_ln))
    
            # Normal weights, no idea what it is.
    
            t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
            elem_data_single_float64_array(lay_nor, b"NormalsW", t_ln)
    
    
        # tspace
        tspacenumber = 0
        if scene_data.settings.use_tspace:
            tspacenumber = len(me.uv_layers)
            if tspacenumber:
    
                t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
                t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
    
                for idx, uvlayer in enumerate(me.uv_layers):
                    name = uvlayer.name
                    me.calc_tangents(name)
                    # Loop bitangents (aka binormals).
                    # NOTE: this is not supported by importer currently.
                    me.loops.foreach_get("bitangent", t_ln)
                    lay_nor = elem_data_single_int32(geom, b"LayerElementBinormal", idx)
                    elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_BINORMAL_VERSION)
                    elem_data_single_string_unicode(lay_nor, b"Name", name)
                    elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
                    elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
    
                    elem_data_single_float64_array(lay_nor, b"Binormals", chain(*_nortuples_gen(t_ln, geom_mat_no)))
    
                    # Binormal weights, no idea what it is.
    
                    elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw)
    
    
                    # Loop tangents.
                    # NOTE: this is not supported by importer currently.
                    me.loops.foreach_get("tangent", t_ln)
                    lay_nor = elem_data_single_int32(geom, b"LayerElementTangent", idx)
                    elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_TANGENT_VERSION)
                    elem_data_single_string_unicode(lay_nor, b"Name", name)
                    elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
                    elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
    
                    elem_data_single_float64_array(lay_nor, b"Binormals", chain(*_nortuples_gen(t_ln, geom_mat_no)))
    
                    # Tangent weights, no idea what it is.
    
                    elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw)
    
                me.free_tangents()
    
        me.free_normals_split()
    
    
        # Write VertexColor Layers
        # note, no programs seem to use this info :/
        vcolnumber = len(me.vertex_colors)
        if vcolnumber:
            def _coltuples_gen(raw_cols):
                return zip(*(iter(raw_cols),) * 3 + (_infinite_gen(1.0),))  # We need a fake alpha...
    
    
            t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
    
            for colindex, collayer in enumerate(me.vertex_colors):
                collayer.data.foreach_get("color", t_lc)
                lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex)
                elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION)
                elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name)
                elem_data_single_string(lay_vcol, b"MappingInformationType", b"ByPolygonVertex")
                elem_data_single_string(lay_vcol, b"ReferenceInformationType", b"IndexToDirect")
    
                col2idx = tuple(set(_coltuples_gen(t_lc)))
                elem_data_single_float64_array(lay_vcol, b"Colors", chain(*col2idx))  # Flatten again...
    
                col2idx = {col: idx for idx, col in enumerate(col2idx)}
    
    Bastien Montagne's avatar
    Bastien Montagne committed
                elem_data_single_int32_array(lay_vcol, b"ColorIndex", (col2idx[c] for c in _coltuples_gen(t_lc)))
    
                del col2idx
            del t_lc
            del _coltuples_gen
    
        # Write UV layers.
        # Note: LayerElementTexture is deprecated since FBX 2011 - luckily!
        #       Textures are now only related to materials, in FBX!
        uvnumber = len(me.uv_layers)
        if uvnumber:
            def _uvtuples_gen(raw_uvs):
                return zip(*(iter(raw_uvs),) * 2)
    
    
            t_luv = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 2
    
            for uvindex, uvlayer in enumerate(me.uv_layers):
                uvlayer.data.foreach_get("uv", t_luv)
                lay_uv = elem_data_single_int32(geom, b"LayerElementUV", uvindex)
                elem_data_single_int32(lay_uv, b"Version", FBX_GEOMETRY_UV_VERSION)
                elem_data_single_string_unicode(lay_uv, b"Name", uvlayer.name)
                elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex")
                elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect")
    
                uv2idx = tuple(set(_uvtuples_gen(t_luv)))
                elem_data_single_float64_array(lay_uv, b"UV", chain(*uv2idx))  # Flatten again...
    
                uv2idx = {uv: idx for idx, uv in enumerate(uv2idx)}
    
    Bastien Montagne's avatar
    Bastien Montagne committed
                elem_data_single_int32_array(lay_uv, b"UVIndex", (uv2idx[uv] for uv in _uvtuples_gen(t_luv)))
    
                del uv2idx
            del t_luv
            del _uvtuples_gen
    
        # Face's materials.
        me_fbxmats_idx = None
        if me in scene_data.mesh_mat_indices:
            me_fbxmats_idx = scene_data.mesh_mat_indices[me]
            me_blmats = me.materials
            if me_fbxmats_idx and me_blmats:
                lay_mat = elem_data_single_int32(geom, b"LayerElementMaterial", 0)
                elem_data_single_int32(lay_mat, b"Version", FBX_GEOMETRY_MATERIAL_VERSION)
                elem_data_single_string(lay_mat, b"Name", b"")
                nbr_mats = len(me_fbxmats_idx)
                if nbr_mats > 1:
    
                    t_pm = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
    
                    me.polygons.foreach_get("material_index", t_pm)
    
                    # We have to validate mat indices, and map them to FBX indices.
                    blmats_to_fbxmats_idxs = [me_fbxmats_idx[m] for m in me_blmats]
                    mat_idx_limit = len(blmats_to_fbxmats_idxs)
                    def_mat = blmats_to_fbxmats_idxs[0]
    
                    _gen = (blmats_to_fbxmats_idxs[m] if m < mat_idx_limit else def_mat for m in t_pm)
    
                    t_pm = array.array(data_types.ARRAY_INT32, _gen)
    
                    elem_data_single_string(lay_mat, b"MappingInformationType", b"ByPolygon")
    
                    # XXX Logically, should be "Direct" reference type, since we do not have any index array, and have one
                    #     value per polygon...
                    #     But looks like FBX expects it to be IndexToDirect here (maybe because materials are already
                    #     indices??? *sigh*).
                    elem_data_single_string(lay_mat, b"ReferenceInformationType", b"IndexToDirect")
    
                    elem_data_single_int32_array(lay_mat, b"Materials", t_pm)
                    del t_pm
                else:
                    elem_data_single_string(lay_mat, b"MappingInformationType", b"AllSame")
                    elem_data_single_string(lay_mat, b"ReferenceInformationType", b"IndexToDirect")
                    elem_data_single_int32_array(lay_mat, b"Materials", [0])
    
        # And the "layer TOC"...
    
        layer = elem_data_single_int32(geom, b"Layer", 0)
        elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
        lay_nor = elem_empty(layer, b"LayerElement")
        elem_data_single_string(lay_nor, b"Type", b"LayerElementNormal")
        elem_data_single_int32(lay_nor, b"TypedIndex", 0)
        if smooth_type in {'FACE', 'EDGE'}:
            lay_smooth = elem_empty(layer, b"LayerElement")
            elem_data_single_string(lay_smooth, b"Type", b"LayerElementSmoothing")
            elem_data_single_int32(lay_smooth, b"TypedIndex", 0)
        if vcolnumber:
            lay_vcol = elem_empty(layer, b"LayerElement")
            elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
            elem_data_single_int32(lay_vcol, b"TypedIndex", 0)
        if uvnumber:
            lay_uv = elem_empty(layer, b"LayerElement")
            elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
            elem_data_single_int32(lay_uv, b"TypedIndex", 0)
        if me_fbxmats_idx is not None:
            lay_mat = elem_empty(layer, b"LayerElement")
            elem_data_single_string(lay_mat, b"Type", b"LayerElementMaterial")
            elem_data_single_int32(lay_mat, b"TypedIndex", 0)
    
        # Add other uv and/or vcol layers...
        for vcolidx, uvidx, tspaceidx in zip_longest(range(1, vcolnumber), range(1, uvnumber), range(1, tspacenumber),
                                                     fillvalue=0):
            layer = elem_data_single_int32(geom, b"Layer", max(vcolidx, uvidx))
            elem_data_single_int32(layer, b"Version", FBX_GEOMETRY_LAYER_VERSION)
            if vcolidx:
                lay_vcol = elem_empty(layer, b"LayerElement")
                elem_data_single_string(lay_vcol, b"Type", b"LayerElementColor")
                elem_data_single_int32(lay_vcol, b"TypedIndex", vcolidx)
            if uvidx:
                lay_uv = elem_empty(layer, b"LayerElement")
                elem_data_single_string(lay_uv, b"Type", b"LayerElementUV")
                elem_data_single_int32(lay_uv, b"TypedIndex", uvidx)
            if tspaceidx:
                lay_binor = elem_empty(layer, b"LayerElement")
                elem_data_single_string(lay_binor, b"Type", b"LayerElementBinormal")
                elem_data_single_int32(lay_binor, b"TypedIndex", tspaceidx)
                lay_tan = elem_empty(layer, b"LayerElement")
                elem_data_single_string(lay_tan, b"Type", b"LayerElementTangent")
                elem_data_single_int32(lay_tan, b"TypedIndex", tspaceidx)
    
    
    def fbx_data_material_elements(root, mat, scene_data):
        """
        Write the Material data block.
        """
        ambient_color = (0.0, 0.0, 0.0)
        if scene_data.data_world:
            ambient_color = next(iter(scene_data.data_world.keys())).ambient_color
    
        mat_key, _objs = scene_data.data_materials[mat]
        # Approximation...
        mat_type = b"phong" if mat.specular_shader in {'COOKTORR', 'PHONG', 'BLINN'} else b"lambert"
    
        fbx_mat = elem_data_single_int64(root, b"Material", get_fbxuid_from_key(mat_key))
        fbx_mat.add_string(fbx_name_class(mat.name.encode(), b"Material"))
        fbx_mat.add_string(b"")
    
        elem_data_single_int32(fbx_mat, b"Version", FBX_MATERIAL_VERSION)
        # those are not yet properties, it seems...
        elem_data_single_string(fbx_mat, b"ShadingModel", mat_type)
        elem_data_single_int32(fbx_mat, b"MultiLayer", 0)  # Should be bool...
    
        tmpl = scene_data.templates[b"Material"]
        props = elem_properties(fbx_mat)
        elem_props_template_set(tmpl, props, "p_string", b"ShadingModel", mat_type.decode())
        elem_props_template_set(tmpl, props, "p_color_rgb", b"EmissiveColor", mat.diffuse_color)
        elem_props_template_set(tmpl, props, "p_number", b"EmissiveFactor", mat.emit)
        elem_props_template_set(tmpl, props, "p_color_rgb", b"AmbientColor", ambient_color)
        elem_props_template_set(tmpl, props, "p_number", b"AmbientFactor", mat.ambient)
        elem_props_template_set(tmpl, props, "p_color_rgb", b"DiffuseColor", mat.diffuse_color)
        elem_props_template_set(tmpl, props, "p_number", b"DiffuseFactor", mat.diffuse_intensity)
    
        elem_props_template_set(tmpl, props, "p_color_rgb", b"TransparentColor",
                                mat.diffuse_color if mat.use_transparency else (1.0, 1.0, 1.0))
    
        elem_props_template_set(tmpl, props, "p_number", b"TransparencyFactor",
                                1.0 - mat.alpha if mat.use_transparency else 0.0)
        elem_props_template_set(tmpl, props, "p_number", b"Opacity", mat.alpha if mat.use_transparency else 1.0)
        elem_props_template_set(tmpl, props, "p_vector_3d", b"NormalMap", (0.0, 0.0, 0.0))
        # Not sure about those...
    
    Bastien Montagne's avatar
    Bastien Montagne committed
        """
    
        b"Bump": ((0.0, 0.0, 0.0), "p_vector_3d"),
        b"BumpFactor": (1.0, "p_number"),
        b"DisplacementColor": ((0.0, 0.0, 0.0), "p_color_rgb"),
        b"DisplacementFactor": (0.0, "p_number"),
        """
        if mat_type == b"phong":
            elem_props_template_set(tmpl, props, "p_color_rgb", b"SpecularColor", mat.specular_color)
            elem_props_template_set(tmpl, props, "p_number", b"SpecularFactor", mat.specular_intensity / 2.0)
            # See Material template about those two!
            elem_props_template_set(tmpl, props, "p_number", b"Shininess", (mat.specular_hardness - 1.0) / 5.10)
            elem_props_template_set(tmpl, props, "p_number", b"ShininessExponent", (mat.specular_hardness - 1.0) / 5.10)
            elem_props_template_set(tmpl, props, "p_color_rgb", b"ReflectionColor", mat.mirror_color)
            elem_props_template_set(tmpl, props, "p_number", b"ReflectionFactor",
                                    mat.raytrace_mirror.reflect_factor if mat.raytrace_mirror.use else 0.0)
    
        # Custom properties.
        if scene_data.settings.use_custom_properties:
    
            fbx_data_element_custom_properties(props, mat)
    
    
    
    def _gen_vid_path(img, scene_data):
        msetts = scene_data.settings.media_settings
        fname_rel = bpy_extras.io_utils.path_reference(img.filepath, msetts.base_src, msetts.base_dst, msetts.path_mode,
                                                       msetts.subdir, msetts.copy_set, img.library)
        fname_abs = os.path.normpath(os.path.abspath(os.path.join(msetts.base_dst, fname_rel)))
        return fname_abs, fname_rel
    
    
    def fbx_data_texture_file_elements(root, tex, scene_data):
        """
        Write the (file) Texture data block.
        """
        # XXX All this is very fuzzy to me currently...
        #     Textures do not seem to use properties as much as they could.
        #     For now assuming most logical and simple stuff.
    
        tex_key, _mats = scene_data.data_textures[tex]
        img = tex.texture.image
        fname_abs, fname_rel = _gen_vid_path(img, scene_data)
    
        fbx_tex = elem_data_single_int64(root, b"Texture", get_fbxuid_from_key(tex_key))
        fbx_tex.add_string(fbx_name_class(tex.name.encode(), b"Texture"))
        fbx_tex.add_string(b"")
    
        elem_data_single_string(fbx_tex, b"Type", b"TextureVideoClip")
        elem_data_single_int32(fbx_tex, b"Version", FBX_TEXTURE_VERSION)
        elem_data_single_string(fbx_tex, b"TextureName", fbx_name_class(tex.name.encode(), b"Texture"))
        elem_data_single_string(fbx_tex, b"Media", fbx_name_class(img.name.encode(), b"Video"))
        elem_data_single_string_unicode(fbx_tex, b"FileName", fname_abs)
        elem_data_single_string_unicode(fbx_tex, b"RelativeFilename", fname_rel)
    
        alpha_source = 0  # None
        if img.use_alpha:
            if tex.texture.use_calculate_alpha:
                alpha_source = 1  # RGBIntensity as alpha.
            else:
                alpha_source = 2  # Black, i.e. alpha channel.
        # BlendMode not useful for now, only affects layered textures afaics.
        mapping = 0  # None.
        if tex.texture_coords in {'ORCO'}:  # XXX Others?
            if tex.mapping in {'FLAT'}:
                mapping = 1  # Planar
            elif tex.mapping in {'CUBE'}:
                mapping = 4  # Box
            elif tex.mapping in {'TUBE'}:
                mapping = 3  # Cylindrical
            elif tex.mapping in {'SPHERE'}:
                mapping = 2  # Spherical
        elif tex.texture_coords in {'UV'}:
            # XXX *HOW* do we link to correct UVLayer???
            mapping = 6  # UV
        wrap_mode = 1  # Clamp
        if tex.texture.extension in {'REPEAT'}:
            wrap_mode = 0  # Repeat
    
        tmpl = scene_data.templates[b"TextureFile"]
        props = elem_properties(fbx_tex)
        elem_props_template_set(tmpl, props, "p_enum", b"AlphaSource", alpha_source)
        elem_props_template_set(tmpl, props, "p_bool", b"PremultiplyAlpha",
                                img.alpha_mode in {'STRAIGHT'})  # Or is it PREMUL?
        elem_props_template_set(tmpl, props, "p_enum", b"CurrentMappingType", mapping)
        elem_props_template_set(tmpl, props, "p_enum", b"WrapModeU", wrap_mode)
        elem_props_template_set(tmpl, props, "p_enum", b"WrapModeV", wrap_mode)
        elem_props_template_set(tmpl, props, "p_vector_3d", b"Translation", tex.offset)
        elem_props_template_set(tmpl, props, "p_vector_3d", b"Scaling", tex.scale)
        elem_props_template_set(tmpl, props, "p_bool", b"UseMipMap", tex.texture.use_mipmap)
    
        # Custom properties.
        if scene_data.settings.use_custom_properties:
    
            fbx_data_element_custom_properties(props, tex.texture)
    
    def fbx_data_video_elements(root, vid, scene_data):
        """
        Write the actual image data block.
        """
        vid_key, _texs = scene_data.data_videos[vid]
        fname_abs, fname_rel = _gen_vid_path(vid, scene_data)
    
        fbx_vid = elem_data_single_int64(root, b"Video", get_fbxuid_from_key(vid_key))
        fbx_vid.add_string(fbx_name_class(vid.name.encode(), b"Video"))
        fbx_vid.add_string(b"Clip")
    
        elem_data_single_string(fbx_vid, b"Type", b"Clip")
        # XXX No Version???
        elem_data_single_string_unicode(fbx_vid, b"FileName", fname_abs)
        elem_data_single_string_unicode(fbx_vid, b"RelativeFilename", fname_rel)
    
        if scene_data.settings.media_settings.embed_textures:
            try:
                with open(vid.filepath, 'br') as f:
                    elem_data_single_byte_array(fbx_vid, b"Content", f.read())
            except Exception as e:
                print("WARNING: embeding file {} failed ({})".format(vid.filepath, e))
                elem_data_single_byte_array(fbx_vid, b"Content", b"")
        else:
            elem_data_single_byte_array(fbx_vid, b"Content", b"")
    
    
    def fbx_data_armature_elements(root, armature, scene_data):
        """
        Write:
            * Bones "data" (NodeAttribute::LimbNode, contains pretty much nothing!).
            * Deformers (i.e. Skin), bind between an armature and a mesh.
            ** SubDeformers (i.e. Cluster), one per bone/vgroup pair.
            * BindPose.
        Note armature itself has no data, it is a mere "Null" Model...
        """
    
        # Bones "data".
        tmpl = scene_data.templates[b"Bone"]
        for bo in armature.data.bones:
            _bo_key, bo_data_key, _arm = scene_data.data_bones[bo]
            fbx_bo = elem_data_single_int64(root, b"NodeAttribute", get_fbxuid_from_key(bo_data_key))
            fbx_bo.add_string(fbx_name_class(bo.name.encode(), b"NodeAttribute"))
            fbx_bo.add_string(b"LimbNode")
    
    Bastien Montagne's avatar
    Bastien Montagne committed
            elem_data_single_string(fbx_bo, b"TypeFlags", b"Skeleton")
    
    
            props = elem_properties(fbx_bo)
            elem_props_template_set(tmpl, props, "p_number", b"Size", (bo.tail_local - bo.head_local).length)
    
            # Custom properties.
            if scene_data.settings.use_custom_properties:
    
                fbx_data_element_custom_properties(props, bo)
    
    
        # Deformers and BindPoses.
        # Note: we might also use Deformers for our "parent to vertex" stuff???
        deformer = scene_data.data_deformers.get(armature, None)
        if deformer is not None:
            for me, (skin_key, obj, clusters) in deformer.items():
                # BindPose.
                # We assume bind pose for our bones are their "Editmode" pose...
                # All matrices are expected in global (world) space.
                bindpose_key = get_blender_armature_bindpose_key(armature, me)
                fbx_pose = elem_data_single_int64(root, b"Pose", get_fbxuid_from_key(bindpose_key))
                fbx_pose.add_string(fbx_name_class(me.name.encode(), b"Pose"))
                fbx_pose.add_string(b"BindPose")
    
                elem_data_single_string(fbx_pose, b"Type", b"BindPose")
                elem_data_single_int32(fbx_pose, b"Version", FBX_POSE_BIND_VERSION)
                elem_data_single_int32(fbx_pose, b"NbPoseNodes", 1 + len(armature.data.bones))
    
                # First node is mesh/object.
    
                mat_world_obj = fbx_object_matrix(scene_data, obj, global_space=True)
    
                fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
                elem_data_single_int64(fbx_posenode, b"Node", get_fbxuid_from_key(scene_data.objects[obj]))
                elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix_to_array(mat_world_obj))
                # And all bones of armature!
                mat_world_bones = {}
                for bo in armature.data.bones:
    
                    bomat = fbx_object_matrix(scene_data, bo, armature, global_space=True)
    
                    mat_world_bones[bo] = bomat
                    fbx_posenode = elem_empty(fbx_pose, b"PoseNode")
                    elem_data_single_int64(fbx_posenode, b"Node", get_fbxuid_from_key(scene_data.objects[bo]))
                    elem_data_single_float64_array(fbx_posenode, b"Matrix", matrix_to_array(bomat))
    
                # Deformer.
                fbx_skin = elem_data_single_int64(root, b"Deformer", get_fbxuid_from_key(skin_key))
                fbx_skin.add_string(fbx_name_class(armature.name.encode(), b"Deformer"))
                fbx_skin.add_string(b"Skin")
    
                elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
                elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0)  # Only vague idea what it is...
    
                for bo, clstr_key in clusters.items():
                    # Find which vertices are affected by this bone/vgroup pair, and matching weights.
    
    Bastien Montagne's avatar
    Bastien Montagne committed
                    indices = []
                    weights = []
    
                    vg_idx = obj.vertex_groups[bo.name].index
                    for idx, v in enumerate(me.vertices):
                        vert_vg = [vg for vg in v.groups if vg.group == vg_idx]
                        if not vert_vg:
                            continue
                        indices.append(idx)
                        weights.append(vert_vg[0].weight)
    
                    # Create the cluster.
                    fbx_clstr = elem_data_single_int64(root, b"Deformer", get_fbxuid_from_key(clstr_key))
                    fbx_clstr.add_string(fbx_name_class(bo.name.encode(), b"SubDeformer"))
                    fbx_clstr.add_string(b"Cluster")
    
                    elem_data_single_int32(fbx_clstr, b"Version", FBX_DEFORMER_CLUSTER_VERSION)
                    # No idea what that user data might be...
                    fbx_userdata = elem_data_single_string(fbx_clstr, b"UserData", b"")
                    fbx_userdata.add_string(b"")
                    if indices:
                        elem_data_single_int32_array(fbx_clstr, b"Indexes", indices)
                        elem_data_single_float64_array(fbx_clstr, b"Weights", weights)
                    # Transform and TransformLink matrices...
                    # They seem to be mostly the same as BindPose ones???
                    # WARNING! Even though official FBX API presents Transform in global space,
    
    Bastien Montagne's avatar
    Bastien Montagne committed
                    #          **it is stored in bone space in FBX data!** See:
                    #          http://area.autodesk.com/forum/autodesk-fbx/fbx-sdk/why-the-values-return-
                    #                 by-fbxcluster-gettransformmatrix-x-not-same-with-the-value-in-ascii-fbx-file/
    
                    elem_data_single_float64_array(fbx_clstr, b"Transform",
                                                   matrix_to_array(mat_world_bones[bo].inverted() * mat_world_obj))
                    elem_data_single_float64_array(fbx_clstr, b"TransformLink", matrix_to_array(mat_world_bones[bo]))
    
    
    def fbx_data_object_elements(root, obj, scene_data):
        """
        Write the Object (Model) data blocks.
        Note we handle "Model" part of bones as well here!
        """
        obj_type = b"Null"  # default, sort of empty...
    
        if isinstance(obj, Bone):
    
            obj_type = b"LimbNode"
        elif (obj.type == 'MESH'):
            obj_type = b"Mesh"
        elif (obj.type == 'LAMP'):
            obj_type = b"Light"
        elif (obj.type == 'CAMERA'):
            obj_type = b"Camera"
        obj_key = scene_data.objects[obj]
        model = elem_data_single_int64(root, b"Model", get_fbxuid_from_key(obj_key))
        model.add_string(fbx_name_class(obj.name.encode(), b"Model"))
        model.add_string(obj_type)
    
        elem_data_single_int32(model, b"Version", FBX_MODELS_VERSION)
    
        # Object transform info.
    
        loc, rot, scale, matrix, matrix_rot = fbx_object_tx(scene_data, obj)
    
        rot = tuple(units_convert_iter(rot, "radian", "degree"))
    
    
        tmpl = scene_data.templates[b"Model"]
        # For now add only loc/rot/scale...
        props = elem_properties(model)
        elem_props_template_set(tmpl, props, "p_lcl_translation", b"Lcl Translation", loc)
        elem_props_template_set(tmpl, props, "p_lcl_rotation", b"Lcl Rotation", rot)
        elem_props_template_set(tmpl, props, "p_lcl_scaling", b"Lcl Scaling", scale)
    
        # TODO: "constraints" (limit loc/rot/scale, and target-to-object).
    
        # Custom properties.
        if scene_data.settings.use_custom_properties:
    
            fbx_data_element_custom_properties(props, obj)
    
    
        # Those settings would obviously need to be edited in a complete version of the exporter, may depends on
        # object type, etc.
        elem_data_single_int32(model, b"MultiLayer", 0)
        elem_data_single_int32(model, b"MultiTake", 0)
        elem_data_single_bool(model, b"Shading", True)
        elem_data_single_string(model, b"Culling", b"CullingOff")
    
    
        if isinstance(obj, Object) and obj.type == 'CAMERA':
    
            # Why, oh why are FBX cameras such a mess???
            # And WHY add camera data HERE??? Not even sure this is needed...
            render = scene_data.scene.render
            width = render.resolution_x * 1.0
            height = render.resolution_y * 1.0
            elem_props_template_set(tmpl, props, "p_enum", b"ResolutionMode", 0)  # Don't know what it means
            elem_props_template_set(tmpl, props, "p_number", b"AspectW", width)
            elem_props_template_set(tmpl, props, "p_number", b"AspectH", height)
            elem_props_template_set(tmpl, props, "p_bool", b"ViewFrustum", True)
            elem_props_template_set(tmpl, props, "p_enum", b"BackgroundMode", 0)  # Don't know what it means
            elem_props_template_set(tmpl, props, "p_bool", b"ForegroundTransparent", True)
    
    
    
    def fbx_data_animation_elements(root, scene_data):
        """
        Write animation data.
        """
        animations = scene_data.animations
        if not animations:
            return
        scene = scene_data.scene
    
        fps = scene.render.fps / scene.render.fps_base
        def keys_to_ktimes(keys):
            return (int(v) for v in units_convert_iter((f / fps for f, _v in keys), "second", "ktime"))
    
        astack_key, alayers = animations
        acn_tmpl = scene_data.templates[b"AnimationCurveNode"]
    
        # Animation stack.
        astack = elem_data_single_int64(root, b"AnimationStack", get_fbxuid_from_key(astack_key))
        astack.add_string(fbx_name_class(scene.name.encode(), b"AnimStack"))
        astack.add_string(b"")
    
        for obj, (alayer_key, acurvenodes) in alayers.items():
            # Animation layer.
            alayer = elem_data_single_int64(root, b"AnimationLayer", get_fbxuid_from_key(alayer_key))
            alayer.add_string(fbx_name_class(obj.name.encode(), b"AnimLayer"))
            alayer.add_string(b"")
    
            for fbx_prop, (acurvenode_key, acurves) in acurvenodes.items():
                # Animation curve node.
                acurvenode = elem_data_single_int64(root, b"AnimationCurveNode", get_fbxuid_from_key(acurvenode_key))
                acurvenode.add_string(fbx_name_class(fbx_prop.encode(), b"AnimCurveNode"))
                acurvenode.add_string(b"")
    
                acn_props = elem_properties(acurvenode)
    
    
                for fbx_item, (acurve_key, def_value, keys) in acurves.items():
                    elem_props_template_set(acn_tmpl, acn_props, "p_number", fbx_item.encode(), def_value, animatable=True)
    
    
                    # Only create Animation curve if needed!
                    if keys:
                        acurve = elem_data_single_int64(root, b"AnimationCurve", get_fbxuid_from_key(acurve_key))
                        acurve.add_string(fbx_name_class(b"", b"AnimCurve"))
                        acurve.add_string(b"")
    
                        # key attributes...
    
                        # flags...
                        keyattr_flags = (1 << 3 |   # interpolation mode, 1 = constant, 2 = linear, 3 = cubic.
                                         1 << 8 |   # tangent mode, 8 = auto, 9 = TCB, 10 = user, 11 = generic break,
                                         1 << 13 |  # tangent mode, 12 = generic clamp, 13 = generic time independent,
                                         1 << 14 |  # tangent mode, 13 + 14 = generic clamp progressive.
                                         0,
                                        )
                        # Maybe values controlling TCB & co???
                        keyattr_datafloat = (0.0, 0.0, 9.419963346924634e-30, 0.0)
    
                        # And now, the *real* data!
    
                        elem_data_single_float64(acurve, b"Default", def_value)
    
                        elem_data_single_int32(acurve, b"KeyVer", FBX_ANIM_KEY_VERSION)
                        elem_data_single_int64_array(acurve, b"KeyTime", keys_to_ktimes(keys))
                        elem_data_single_float32_array(acurve, b"KeyValueFloat", (v for _f, v in keys))
                        elem_data_single_int32_array(acurve, b"KeyAttrFlags", keyattr_flags)
                        elem_data_single_float32_array(acurve, b"KeyAttrDataFloat", keyattr_datafloat)
    
                        elem_data_single_int32_array(acurve, b"KeyAttrRefCount", (nbr_keys,))
    
    ##### Top-level FBX data container. #####
    
    # Helper container gathering some data we need multiple times:
    #     * templates.
    #     * objects.
    #     * connections.
    #     * takes.
    FBXData = namedtuple("FBXData", (
        "templates", "templates_users", "connections",
    
        "settings", "scene", "objects", "animations",
    
        "data_lamps", "data_cameras", "data_meshes", "mesh_mat_indices",
        "data_bones", "data_deformers",
        "data_world", "data_materials", "data_textures", "data_videos",
    ))
    
    
    def fbx_mat_properties_from_texture(tex):
        """
        Returns a set of FBX metarial properties that are affected by the given texture.
        Quite obviously, this is a fuzzy and far-from-perfect mapping! Amounts of influence are completely lost, e.g.
        Note tex is actually expected to be a texture slot.
        """
        # Tex influence does not exists in FBX, so assume influence < 0.5 = no influence... :/
        INFLUENCE_THRESHOLD = 0.5
    
        # Mapping Blender -> FBX (blend_use_name, blend_fact_name, fbx_name).
        blend_to_fbx = (
            # Lambert & Phong...
            ("diffuse", "diffuse", b"DiffuseFactor"),
            ("color_diffuse", "diffuse_color", b"DiffuseColor"),
            ("alpha", "alpha", b"TransparencyFactor"),
            ("diffuse", "diffuse", b"TransparentColor"),  # Uses diffuse color in Blender!
            ("emit", "emit", b"EmissiveFactor"),
            ("diffuse", "diffuse", b"EmissiveColor"),  # Uses diffuse color in Blender!
            ("ambient", "ambient", b"AmbientFactor"),
            #("", "", b"AmbientColor"),  # World stuff in Blender, for now ignore...
    
            ("normal", "normal", b"NormalMap"),
            # Note: unsure about those... :/
    
            #("", "", b"Bump"),
            #("", "", b"BumpFactor"),
            #("", "", b"DisplacementColor"),
            #("", "", b"DisplacementFactor"),
            # Phong only.
            ("specular", "specular", b"SpecularFactor"),
            ("color_spec", "specular_color", b"SpecularColor"),
            # See Material template about those two!
            ("hardness", "hardness", b"Shininess"),
            ("hardness", "hardness", b"ShininessExponent"),
            ("mirror", "mirror", b"ReflectionColor"),
            ("raymir", "raymir", b"ReflectionFactor"),
        )
    
        tex_fbx_props = set()
        for use_map_name, name_factor, fbx_prop_name in blend_to_fbx:
            if getattr(tex, "use_map_" + use_map_name) and getattr(tex, name_factor + "_factor") >= INFLUENCE_THRESHOLD:
                tex_fbx_props.add(fbx_prop_name)
    
        return tex_fbx_props
    
    
    def fbx_skeleton_from_armature(scene, settings, armature, objects, data_bones, data_deformers, arm_parents):
        """
        Create skeleton from armature/bones (NodeAttribute/LimbNode and Model/LimbNode), and for each deformed mesh,
        create Pose/BindPose(with sub PoseNode) and Deformer/Skin(with Deformer/SubDeformer/Cluster).
        Also supports "parent to bone" (simple parent to Model/LimbNode).
        arm_parents is a set of tuples (armature, object) for all successful armature bindings.
        """
        arm = armature.data
        bones = {}
        for bo in arm.bones:
            key, data_key = get_blender_bone_key(armature, bo)
            objects[bo] = key
            data_bones[bo] = (key, data_key, armature)
            bones[bo.name] = bo
    
        for obj in objects.keys():
    
            if not isinstance(obj, Object):
    
                continue
            if obj.type not in {'MESH'}:
                continue
            if obj.parent != armature:
                continue
    
            # Always handled by an Armature modifier...
            found = False
            for mod in obj.modifiers:
                if mod.type not in {'ARMATURE'}:
                    continue
                # We only support vertex groups binding method, not bone envelopes one!
                if mod.object == armature and mod.use_vertex_groups:
                    found = True
                    break
    
            if not found:
                continue
    
            # Now we have a mesh using this armature. First, find out which bones are concerned!
            # XXX Assuming here non-used bones can have no cluster, this has to be checked!
            used_bones = tuple(bones[vg.name] for vg in obj.vertex_groups if vg.name in bones)
            if not used_bones:
                continue
    
            # Note: bindpose have no relations at all (no connections), so no need for any preprocess for them.
    
            # Create skin & clusters relations (note skins are connected to geometry, *not* model!).
            me = obj.data
            clusters = {bo: get_blender_bone_cluster_key(armature, me, bo) for bo in used_bones}
            data_deformers.setdefault(armature, {})[me] = (get_blender_armature_skin_key(armature, me), obj, clusters)
    
            # We don't want a regular parent relationship for those in FBX...
            arm_parents.add((armature, obj))
    
    
    
    def fbx_animations_simplify(scene_data, animdata):
        """
        Simplifies FCurves!
        """
        fac = scene_data.settings.bake_anim_simplify_factor
        step = scene_data.settings.bake_anim_step
        # So that, with default factor and step values (1), we get:
        max_frame_diff = step * fac * 10  # max step of 10 frames.
        value_diff_fac = fac / 1000  # min value evolution: 0.1% of whole range.
    
        for obj, keys in animdata.items():
            if not keys:
                continue
            extremums = [(min(values), max(values)) for values in zip(*(k[1] for k in keys))]
            min_diffs = [max((mx - mn) * value_diff_fac, 0.000001) for mx, mn in extremums]
            p_currframe, p_key, p_key_write = keys[0]
            p_keyed = [(p_currframe - max_frame_diff, val) for val in p_key]
            for currframe, key, key_write in keys:
                #if obj.name == "Cube":
                    #print(currframe, key, key_write)
                for idx, (val, p_val) in enumerate(zip(key, p_key)):
                    p_keyedframe, p_keyedval = p_keyed[idx]
                    if val == p_val:
                        # Never write keyframe when value is exactly the same as prev one!
                        continue
                    if abs(val - p_val) >= min_diffs[idx]:
                        # If enough difference from previous sampled value, key this value *and* the previous one!
                        key_write[idx] = True
                        p_key_write[idx] = True
                        p_keyed[idx] = (currframe, val)
    
                    elif (abs(val - p_keyedval) >= min_diffs[idx]) or (currframe - p_keyedframe >= max_frame_diff):
    
                        # Else, if enough difference from previous keyed value (or max gap between keys is reached),
                        # key this value only!
                        key_write[idx] = True
                        p_keyed[idx] = (currframe, val)
                p_currframe, p_key, p_key_write = currframe, key, key_write
    
            # Always key last sampled values (we ignore curves with a single valid key anyway).
            p_key_write[:] = [True] * len(p_key_write)
    
    
    
    def fbx_animations_objects(scene_data):
        """
        Generate animation data from objects.
        """
        objects = scene_data.objects
        bake_step = scene_data.settings.bake_anim_step
        scene = scene_data.scene
    
        # FBX mapping info: Property affected, and name of the "sub" property (to distinguish e.g. vector's channels).
        fbx_names = (
            ("Lcl Translation", "d|X"), ("Lcl Translation", "d|Y"), ("Lcl Translation", "d|Z"),
            ("Lcl Rotation", "d|X"), ("Lcl Rotation", "d|Y"), ("Lcl Rotation", "d|Z"),
            ("Lcl Scaling", "d|X"), ("Lcl Scaling", "d|Y"), ("Lcl Scaling", "d|Z"),
        )
    
        back_currframe = scene.frame_current
        animdata = {obj: [] for obj in objects.keys()}
    
        currframe = scene.frame_start
        while currframe < scene.frame_end:
            scene.frame_set(int(currframe), currframe - int(currframe))
            for obj in objects.keys():
                if isinstance(obj, Bone):
                    continue  # TODO!
                # We compute baked loc/rot/scale for all objects.
                loc, rot, scale, _m, _mr = fbx_object_tx(scene_data, obj)
                tx = tuple(loc) + tuple(units_convert_iter(rot, "radian", "degree")) + tuple(scale)
                animdata[obj].append((currframe, tx, [False] * len(tx)))
            currframe += bake_step
    
        scene.frame_set(back_currframe, 0.0)
    
        fbx_animations_simplify(scene_data, animdata)
    
        animations = {}
    
        # And now, produce final data (usable by FBX export code)...
        for obj, keys in animdata.items():
            if not keys:
                continue
            curves = [[] for k in keys[0][1]]
            for currframe, key, key_write in keys:
                #if obj.name == "Cube":
                    #print(currframe, key, key_write)
                for idx, (val, wrt) in enumerate(zip(key, key_write)):
                    if wrt:
                        curves[idx].append((currframe, val))
    
            loc, rot, scale, _m, _mr = fbx_object_tx(scene_data, obj)
            tx = tuple(loc) + tuple(units_convert_iter(rot, "radian", "degree")) + tuple(scale)
            # If animation for a channel, (True, keyframes), else (False, current value).
            final_keys = {}
            for idx, c in enumerate(curves):
                fbx_group, fbx_item = fbx_names[idx]
                fbx_item_key = get_blender_anim_curve_key(obj, fbx_group, fbx_item)
                if fbx_group not in final_keys:
                    final_keys[fbx_group] = (get_blender_anim_curve_node_key(obj, fbx_group), {})
                final_keys[fbx_group][1][fbx_item] = (fbx_item_key, tx[idx], c if len(c) > 1 else [])
            # And now, remove anim groups (i.e. groups of curves affecting a single FBX property) with no curve at all!
            del_groups = []
            for grp, (_k, data) in final_keys.items():
                if True in (bool(d[2]) for d in data.values()):
                    continue
                del_groups.append(grp)
            for grp in del_groups:
                del final_keys[grp]
    
            if final_keys:
                animations[obj] = (get_blender_anim_layer_key(obj), final_keys)
    
        return (get_blender_anim_stack_key(scene), animations) if animations else None
    
    
    
    def fbx_data_from_scene(scene, settings):
        """
        Do some pre-processing over scene's data...
        """
        objtypes = settings.object_types
    
        ##### Gathering data...
    
        # This is rather simple for now, maybe we could end generating templates with most-used values
        # instead of default ones?
        objects = {obj: get_blenderID_key(obj) for obj in scene.objects if obj.type in objtypes}
        data_lamps = {obj.data: get_blenderID_key(obj.data) for obj in objects if obj.type == 'LAMP'}
        # Unfortunately, FBX camera data contains object-level data (like position, orientation, etc.)...
        data_cameras = {obj: get_blenderID_key(obj.data) for obj in objects if obj.type == 'CAMERA'}
    
        data_meshes = {obj.data: (get_blenderID_key(obj.data), obj) for obj in objects if obj.type == 'MESH'}
    
    
        # Armatures!
        data_bones = {}
        data_deformers = {}
        arm_parents = set()
        for obj in tuple(objects.keys()):
            if obj.type not in {'ARMATURE'}:
                continue
            fbx_skeleton_from_armature(scene, settings, obj, objects, data_bones, data_deformers, arm_parents)
    
        # Some world settings are embedded in FBX materials...
        if scene.world:
            data_world = {scene.world: get_blenderID_key(scene.world)}
        else:
            data_world = {}
    
        # TODO: Check all the mat stuff works even when mats are linked to Objects
        #       (we can then have the same mesh used with different materials...).
        #       *Should* work, as FBX always links its materials to Models (i.e. objects).
        #       XXX However, material indices would probably break...
        data_materials = {}
        for obj in objects:
            # Only meshes for now!
    
            if not isinstance(obj, Object) or obj.type not in {'MESH'}:
    
                continue
            for mat_s in obj.material_slots:
                mat = mat_s.material
                # Note theoretically, FBX supports any kind of materials, even GLSL shaders etc.
                # However, I doubt anything else than Lambert/Phong is really portable!